prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import itertools
import traceback
import uuid
from functools import partial, reduce
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from pdb import set_trace as st
import numpy as np
import pandas as pd
import os
import tensorflow as tf
from nninst_graph import AttrMap, Graph, GraphAttrKey
import nninst_mode as mode
from dataset import cifar10
from dataset.mnist_transforms import *
from dataset.config import MNIST_PATH, CIFAR10_PATH
# from nninst.backend.tensorflow.dataset import imagenet, imagenet_raw
# from nninst.backend.tensorflow.dataset.imagenet_hierarchy import imagenet_class_tree
# from nninst.backend.tensorflow.dataset.imagenet_preprocessing import (
# alexnet_preprocess_image,
# )
from tf_graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from model import LeNet
from model.resnet18cifar10 import ResNet18Cifar10
from model.resnet10cifar10 import ResNet10Cifar10
# from nninst.backend.tensorflow.model import AlexNet, LeNet, ResNet50
from model.config import ModelConfig
# from nninst.backend.tensorflow.model.config import (
# ALEXNET,
# RESNET_50,
# VGG_16,
# ModelConfig,
# )
from trace.common import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
reconstruct_trace_from_tf_brute_force,
)
from trace.common import (
reconstruct_stat_from_tf,
reconstruct_trace_from_tf_v2,
)
# from nninst.dataset.envs import IMAGENET_RAW_DIR
from nninst_op import Conv2dOp
from nninst_path import (
get_trace_path_in_fc_layers,
get_trace_path_intersection_in_fc_layers,
)
from nninst_statistics import (
calc_trace_path_num,
calc_trace_size,
calc_trace_size_per_layer,
)
from nninst_trace import (
TraceKey,
compact_edge,
compact_trace,
merge_compact_trace,
merge_compact_trace_diff,
merge_compact_trace_intersect,
)
from nninst_utils import filter_value_not_null, merge_dict
from nninst_utils.fs import CsvIOAction, ImageIOAction, IOAction, abspath
from nninst_utils.numpy import arg_approx, arg_sorted_topk
from nninst_utils.ray import ray_iter
__all__ = [
"clean_overlap_ratio",
"overlap_ratio",
"get_overlay_summary",
"resnet_50_imagenet_overlap_ratio",
"alexnet_imagenet_overlap_ratio",
"resnet_50_imagenet_overlap_ratio_error",
"get_overlay_summary_one_side",
"resnet_50_imagenet_overlap_ratio_rand",
"alexnet_imagenet_overlap_ratio_top5",
"resnet_50_imagenet_overlap_ratio_top5_rand",
"resnet_50_imagenet_overlap_ratio_top5",
"alexnet_imagenet_overlap_ratio_error",
"alexnet_imagenet_overlap_ratio_rand",
"alexnet_imagenet_overlap_ratio_top5_rand",
"alexnet_imagenet_overlap_ratio_top5_diff",
]
def calc_all_overlap(
class_trace: AttrMap,
trace: AttrMap,
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
node_name: str = None,
compact: bool = False,
use_intersect_size: bool = False,
key: str = TraceKey.EDGE,
) -> Dict[str, float]:
if node_name is None:
if use_intersect_size:
overlap_ratio, intersect_size = overlap_fn(
class_trace, trace, key, return_size=True
)
return {key + "_size": intersect_size, key: overlap_ratio}
else:
return {
**{
key + "_size": calc_trace_size(trace, key, compact=compact)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
},
**{
key: overlap_fn(class_trace, trace, key)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
},
}
else:
all_overlap = {
key: overlap_fn(class_trace, trace, key, node_name)
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]
}
for key in [
TraceKey.EDGE,
TraceKey.POINT,
TraceKey.WEIGHT
]:
if node_name in trace.ops:
node_trace = trace.ops[node_name]
if key in node_trace:
if compact:
all_overlap[key + "_size"] = np.count_nonzero(
np.unpackbits(node_trace[key])
)
else:
all_overlap[key + "_size"] = TraceKey.to_array(
node_trace[key]
).size
return all_overlap
# Compute mnist overlap ratio between the traces of clean test images and class traces
def clean_overlap_ratio(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
num_gpus:float = 0.2,
images_per_class: int = 1,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath("result/lenet/model_dropout")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
# print(class_id, predicted_label)
# st()
if predicted_label != class_id:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
}
# st()
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
chunksize=1,
out_of_order=True,
num_gpus=0.2,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
# Compute transformed (translation, rotation and scale)
# mnist overlap ratio between the traces of clean test images and class traces
def translation_overlap_ratio(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
transforms=None,
name = None,
num_gpus = 0.2,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath("result/lenet/model_augmentation")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
# Check the prediction on clean untransformed image, so don't need
# transform
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
# print(class_id, predicted_label)
# st()
if predicted_label != class_id:
return [{}] if per_node else {}
# Reconstruct regardless of the correctness of prediction
trace = reconstruct_trace_from_tf_brute_force(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
)
# row = {
# "image_id": image_id,
# **map_prefix(
# calc_all_overlap(
# class_trace_fn(class_id).load(), trace, overlap_fn
# ),
# "original",
# ),
# }
# st()
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
# for image_id in range(0, images_per_class)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [trace for trace in traces if len(trace) != 0]
acc = len(traces) / (images_per_class * 10)
traces = pd.DataFrame(traces).mean()
traces.loc['accuracy'] = acc
traces = traces.to_frame()
traces.columns = [name]
return traces
return CsvIOAction(path, init_fn=get_overlap_ratio)
# Compute the mean overlap ratio of attacked image
def attack_overlap_ratio(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
num_gpus: float = 0.2,
model_dir = "result/lenet/model_augmentation",
transforms = None,
transform_name = "noop",
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
nonlocal model_dir
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath(model_dir)
ckpt_dir = f"{model_dir}/ckpts"
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook,
create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=ckpt_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
# model_dir not ckpt_dir
model_dir=model_dir,
transforms = transforms,
transform_name = transform_name,
mode = "test",
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=ckpt_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf_brute_force(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
"class_id": class_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
**map_prefix(
calc_all_overlap(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
overlap_fn,
),
"adversarial",
),
}
# row = calc_all_overlap(
# class_trace_fn(class_id).load(), adversarial_trace, overlap_fn
# )
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [trace for trace in traces if len(trace) != 0]
# acc = len(traces) / (images_per_class * 10)
# traces = pd.DataFrame(traces).mean()
# traces.loc['clean_accuracy'] = acc
# traces = traces.to_frame()
# traces.columns = [attack_name]
# return traces
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def lenet_mnist_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
data_dir = abspath(MNIST_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
create_model = lambda: LeNet(data_format="channels_first")
if mode == "test":
dataset = mnist.test
elif mode == "train":
dataset = mnist.train
else:
raise RuntimeError("Dataset invalid")
input = dataset(data_dir,
normed=False,
transforms=transforms,
)
# st()
# input = input.filter(lambda image, label: tf.equal(tf.convert_to_tensor(class_id, dtype=tf.int32), label))
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: dataset(data_dir,
normed=False,
transforms=transforms,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}_{transform_name}"
result_dir = f"{model_dir}/attack/{mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def resnet18_cifar10_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
dataset_mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_one_input_from_dataset(dataset):
input = (dataset
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
)
return input
def get_example() -> np.ndarray:
data_dir = abspath(CIFAR10_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
# create_model = lambda: LeNet(data_format="channels_first")
create_model = lambda: partial(
ResNet18Cifar10(),
training = False,
)
from dataset.cifar10_main import input_fn_for_adversarial_examples
# dataset = input_fn_for_adversarial_examples(
# is_training= False,
# data_dir=data_dir,
# num_parallel_batches=1,
# is_shuffle=False,
# transform_fn=None,
# )
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: get_one_input_from_dataset(
# dataset
# ),
# attack_fn=attack_fn,
# model_dir=ckpt_dir,
# **kwargs,
# )
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: (
input_fn_for_adversarial_examples(
is_training= False,
data_dir=data_dir,
num_parallel_batches=1,
is_shuffle=False,
transform_fn=None,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
),
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}_{transform_name}"
result_dir = f"{model_dir}/attack/{dataset_mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def resnet10_cifar10_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
model_dir: str,
dataset_mode: str ,
transform_name: str = "noop",
transforms: Transforms = None,
**kwargs,
) -> IOAction[np.ndarray]:
def get_one_input_from_dataset(dataset):
input = (dataset
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
)
return input
def get_example() -> np.ndarray:
data_dir = abspath(CIFAR10_PATH)
ckpt_dir = f"{model_dir}/ckpts"
ckpt_dir = abspath(ckpt_dir)
# create_model = lambda: LeNet(data_format="channels_first")
create_model = lambda: partial(
ResNet10Cifar10(),
training = False,
)
from dataset.cifar10_main import input_fn_for_adversarial_examples
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: (
input_fn_for_adversarial_examples(
is_training= (dataset_mode=="train"),
data_dir=data_dir,
num_parallel_batches=1,
is_shuffle=False,
transform_fn=None,
)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1)
.make_one_shot_iterator()
.get_next()[0]
),
attack_fn=attack_fn,
model_dir=ckpt_dir,
**kwargs,
)
return adversarial_example
name = f"{attack_name}"
result_dir = f"{model_dir}/attack/{dataset_mode}/{name}/{class_id}"
path = os.path.join(result_dir, f"{image_id}.pkl")
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def adversarial_example_image(
example_io: IOAction[np.ndarray], cache: bool = True
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
example = example_io.load()
if example is None:
return None
return (np.squeeze(example, axis=0) * 255).astype(np.uint8)
path = example_io.path.replace(".pkl", ".png")
return ImageIOAction(path, init_fn=get_example, cache=cache)
def generate_examples(
example_fn: Callable[..., IOAction[np.ndarray]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
transform_name: str = "noop",
transforms = None,
cache: bool = True,
num_gpus=0.2,
**kwargs,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_io = example_fn(
attack_name=attack_name,
class_id=class_id,
image_id=image_id,
cache=cache,
transforms = transforms,
transform_name = transform_name,
**kwargs,
)
example_io.save()
adversarial_example_image(example_io, cache=cache).save()
return class_id, image_id
except Exception:
return class_id, image_id, traceback.format_exc()
name = f"{attack_name}_{transform_name}"
print(f"begin {name}, num_gpu={num_gpus}")
if len(image_ids) > 99:
chunksize = 4
else:
chunksize = 1
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=chunksize,
out_of_order=True,
num_gpus=num_gpus,
# huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {name}")
def get_overlay_summary(
overlap_ratios: pd.DataFrame, trace_key: str, threshold=1
) -> Dict[str, int]:
condition_positive = len(overlap_ratios)
if condition_positive == 0:
return {}
original_key = f"original.{trace_key}"
false_positive = np.count_nonzero(overlap_ratios[original_key] < threshold)
adversarial_key = f"adversarial.{trace_key}"
true_positive = np.count_nonzero(overlap_ratios[adversarial_key] < threshold)
predicted_condition_positive = true_positive + false_positive
recall = (true_positive / condition_positive) if condition_positive != 0 else 0
precision = (
(true_positive / predicted_condition_positive)
if predicted_condition_positive != 0
else 0
)
f1 = (2 / ((1 / recall) + (1 / precision))) if recall != 0 and precision != 0 else 0
return dict(
threshold=threshold,
condition_positive=condition_positive,
# predicted_condition_positive=predicted_condition_positive,
original_is_higher=np.count_nonzero(
(overlap_ratios[original_key] - overlap_ratios[adversarial_key]) > 0
),
# adversarial_is_higher=np.count_nonzero(
# (overlap_ratios[adversarial_key] - overlap_ratios[original_key]) > 0),
true_positive=true_positive,
false_positive=false_positive,
recall=recall,
precision=precision,
f1=f1,
)
def overlap_ratio(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath("/home/yxqiu/data/mnist/raw")
model_dir = abspath("tf/lenet/model_early")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label))
# .skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
# class_id = mnist_info.test().label(image_id)
#
# if class_id != trace.attrs[GraphAttrKey.PREDICT]:
# return [{}] if per_node else {}
if trace is None:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label))
# .skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
#
# if adversarial_example is None:
# return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
row = {
"image_id": image_id,
**map_prefix(
calc_all_overlap(
class_trace_fn(class_id).load(), trace, overlap_fn
),
"original",
),
**map_prefix(
calc_all_overlap(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
overlap_fn,
),
"adversarial",
),
}
return row
else:
return {}
# traces = ray_iter(get_row, (image_id for image_id in range(300, 350)),
# traces = ray_iter(get_row, (image_id for image_id in range(131, 300)),
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 100)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
# chunksize=1, out_of_order=False, num_gpus=1)
# count = 0
# result = []
# for trace in traces:
# result.append(trace)
# print(count)
# count += 1
# traces = [trace for trace in result if len(trace) != 0]
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_class_trace_from_tf(
class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
model_dir=model_dir,
select_fn=select_fn,
per_channel=per_channel,
)
if trace is None:
return {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir, class_id, image_id, normed=False
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
adversarial_class_trace = class_trace_fn(adversarial_label).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"node_name": node_name,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
# traces = ray_iter(get_row, (image_id for image_id in range(300, 350)),
# traces = ray_iter(get_row, (image_id for image_id in range(131, 300)),
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
# for image_id in range(0, 50)
for class_id in range(1, 1001)
),
# for class_id in range(1, 2)),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
# chunksize=1, out_of_order=False, num_gpus=1)
# count = 0
# result = []
# for trace in traces:
# result.append(trace)
# print(count)
# count += 1
# traces = [trace for trace in result if len(trace) != 0]
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
if trace is None:
return {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir, class_id, image_id, normed=False
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
if adversarial_label not in label_top5:
# if np.intersect1d(label_top5, adversarial_label_top5).size == 0:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in label_top5]
)
adversarial_class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in adversarial_label_top5]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_error(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if class_id == trace.attrs[GraphAttrKey.PREDICT]:
return {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 3)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_rand(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
example = np.random.random_sample((1, 224, 224, 3)).astype(np.float32)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5_rand(
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
example = np.random.random_sample((1, 224, 224, 3)).astype(np.float32)
trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = merge_compact_trace(
*[
class_trace_fn(label).load()
for label in trace.attrs[GraphAttrKey.PREDICT_TOP5]
]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
row = {
"image_id": image_id,
"label": class_id,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
cache: bool = True,
**kwargs,
) -> IOAction[np.ndarray]:
return imagenet_example(
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
cache=cache,
**kwargs,
)
# deprecated
def alexnet_imagenet_example_trace_old(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
)[0]
return compact_trace(trace, graph)
name = "alexnet_imagenet"
path = f"store/analysis/example_trace/{name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_example_trace_of_target_class(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
trace_of_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
return compact_trace(trace_of_target_class, graph)
name = "alexnet_imagenet"
path = f"store/analysis/example_trace_of_target_class/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_adversarial_example_trace(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
)[0]
return compact_trace(adversarial_trace, graph)
name = "alexnet_imagenet"
path = f"store/analysis/adversarial_example_trace/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def alexnet_imagenet_adversarial_example_trace_of_original_class(
attack_name: str, class_id: int, image_id: int, threshold: float
) -> IOAction[AttrMap]:
def get_example() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=None,
generate_adversarial_fn=None,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace_of_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=lambda input: arg_approx(input, threshold),
model_dir=model_dir,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
return compact_trace(adversarial_trace_of_original_class, graph)
name = "alexnet_imagenet"
path = f"store/analysis/adversarial_example_trace_of_original_class/{name}/attack={attack_name}/threshold={threshold:.3f}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=True, compress=True)
def generate_traces(
trace_fn: Callable[..., IOAction[AttrMap]],
attack_name: str,
class_ids: Iterable[int],
image_ids: Iterable[int],
**kwargs,
):
def generate_traces_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
trace_fn(
attack_name=attack_name, class_id=class_id, image_id=image_id, **kwargs
).save()
return class_id, image_id
except Exception:
return class_id, image_id, traceback.format_exc()
results = ray_iter(
generate_traces_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
print(f"finish class {class_id} image {image_id}")
def resnet_50_imagenet_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
cache: bool = True,
**kwargs,
) -> IOAction[np.ndarray]:
return imagenet_example(
model_config=RESNET_50,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
cache=cache,
**kwargs,
)
def vgg_16_imagenet_example(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
cache: bool = True,
**kwargs,
) -> IOAction[np.ndarray]:
return imagenet_example(
model_config=VGG_16,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
cache=cache,
**kwargs,
)
def imagenet_example(
model_config: ModelConfig,
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
cache: bool = True,
**kwargs,
) -> IOAction[np.ndarray]:
def get_example() -> np.ndarray:
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
return adversarial_example
name = f"{model_config.name}_imagenet"
path = f"store/example/{attack_name}/{name}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example, cache=cache, compress=True)
def alexnet_imagenet_example_stat(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = None,
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
return imagenet_example_stat(
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
def resnet_50_imagenet_example_stat(
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = None,
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
return imagenet_example_stat(
model_config=RESNET_50,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
def imagenet_example_trace(
model_config: ModelConfig,
attack_name,
attack_fn,
generate_adversarial_fn,
trace_fn,
class_id: int,
image_id: int,
threshold: float,
per_channel: bool = False,
cache: bool = True,
train: bool = False,
**kwargs,
) -> IOAction[AttrMap]:
def get_example_trace() -> AttrMap:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: (imagenet_raw.train if train else imagenet_raw.test)(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return None
if attack_name == "original":
trace = reconstruct_trace_from_tf_v2(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
trace_fn=partial(
trace_fn, select_fn=lambda input: arg_approx(input, threshold)
),
model_dir=model_dir,
)[0]
trace = compact_trace(trace, graph, per_channel=per_channel)
return trace
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_trace_from_tf_v2(
model_fn=model_fn,
input_fn=adversarial_input_fn,
trace_fn=partial(
trace_fn, select_fn=lambda input: arg_approx(input, threshold)
),
model_dir=model_dir,
)[0]
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
return adversarial_trace
name = f"{model_config.name}_imagenet"
if train:
name = f"{name}_train"
if per_channel:
trace_name = "example_channel_trace"
else:
trace_name = "example_trace"
path = f"store/{trace_name}/approx_{threshold:.3f}/{attack_name}/{name}/{class_id}/{image_id}.pkl"
return IOAction(path, init_fn=get_example_trace, cache=cache, compress=True)
# alexnet_imagenet_example_trace = partial(
# imagenet_example_trace,
# model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
# )
#
# resnet_50_imagenet_example_trace = partial(
# imagenet_example_trace, model_config=RESNET_50
# )
#
# vgg_16_imagenet_example_trace = partial(imagenet_example_trace, model_config=VGG_16)
def imagenet_example_stat(
model_config: ModelConfig,
attack_name,
attack_fn,
generate_adversarial_fn,
class_id: int,
image_id: int,
stat_name: str = "avg",
cache: bool = True,
**kwargs,
) -> IOAction[Dict[str, np.ndarray]]:
def get_example_trace() -> Dict[str, np.ndarray]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
# input_fn = lambda: imagenet_raw.test(data_dir, class_id, image_id,
input_fn = lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
# if predicted_label != class_id:
# return None
if attack_name == "original":
trace = reconstruct_stat_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
model_dir=model_dir,
stat_name=stat_name,
)[0]
return trace
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return None
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return None
adversarial_trace = reconstruct_stat_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
model_dir=model_dir,
stat_name=stat_name,
)[0]
return adversarial_trace
name = f"{model_config.name}_imagenet"
trace_name = "example_stat"
path = (
f"store/{trace_name}/{stat_name}/{attack_name}/{name}/{class_id}/{image_id}.pkl"
)
return IOAction(path, init_fn=get_example_trace, cache=cache, compress=True)
def generate_example_traces(
example_trace_fn: Callable[..., IOAction[AttrMap]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
attack_fn,
generate_adversarial_fn,
threshold: float,
per_channel: bool = False,
cache: bool = True,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
train: bool = False,
**kwargs,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_trace_io = example_trace_fn(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
threshold=threshold,
per_channel=per_channel,
cache=cache,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
train=train,
**kwargs,
)
example_trace_io.save()
return class_id, image_id
except Exception as e:
raise e
# return class_id, image_id, traceback.format_exc()
print(f"begin {attack_name}")
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {attack_name}")
def generate_example_stats(
example_trace_fn: Callable[..., IOAction[Dict[str, np.ndarray]]],
class_ids: Iterable[int],
image_ids: Iterable[int],
attack_name: str,
attack_fn,
generate_adversarial_fn,
stat_name: str = None,
cache: bool = True,
**kwargs,
):
def generate_examples_fn(
class_id: int, image_id: int
) -> Union[Tuple[int, int], Tuple[int, int, str]]:
try:
class_id = int(class_id)
image_id = int(image_id)
example_trace_io = example_trace_fn(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
stat_name=stat_name,
cache=cache,
**kwargs,
)
example_trace_io.save()
return class_id, image_id
except Exception as e:
raise e
# return class_id, image_id, traceback.format_exc()
print(f"begin {attack_name}")
results = ray_iter(
generate_examples_fn,
[(class_id, image_id) for image_id in image_ids for class_id in class_ids],
chunksize=1,
out_of_order=True,
num_gpus=0,
huge_task=True,
)
for result in results:
if len(result) == 3:
class_id, image_id, tb = result
print(f"## raise exception from class {class_id}, image {image_id}:")
print(tb)
else:
class_id, image_id = result
# print(f"finish class {class_id} image {image_id}")
print(f"finish {attack_name}")
def alexnet_imagenet_overlap_ratio(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_class_trace_from_tf(
class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
model_dir=model_dir,
select_fn=select_fn,
per_channel=per_channel,
)
if trace is None:
return {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
if class_id != adversarial_label:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = class_trace_fn(class_id).load()
adversarial_class_trace = class_trace_fn(adversarial_label).load()
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
(
f"original.{TraceKey.WEIGHT}" in row
and row[f"original.{TraceKey.WEIGHT}"] is not None
)
or (
f"original.{TraceKey.EDGE}" in row
and row[f"original.{TraceKey.EDGE}"]
)
is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def get_predicted_value_contribution(
trace: AttrMap, graph: Graph, class_id: int, create_model, input_fn, model_dir
) -> float:
# print(calc_density_compact(trace, TraceKey.EDGE))
return get_predicted_value(
class_id=class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
prediction_hooks=[MaskWeightWithTraceHook(graph, trace)],
)
def alexnet_imagenet_overlap_ratio_top5_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
with tf.Session() as sess:
original_example = sess.run(
imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
normed=False,
)
.make_one_shot_iterator()
.get_next()[0]
)
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
("pvc_in_class_in_rest", example_trace_in_class_in_rest),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
for k, base_class_id in zip(range(1, topk_calc_range + 1), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace, input_fn),
f"original.top{k}",
),
}
for k, base_class_id in zip(
range(1, topk_calc_range + 1), adversarial_label_top5
):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"perturbation": np.linalg.norm(
adversarial_example - original_example
)
/ original_example.size,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_top5_diff_uint8(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = adversarial_example_image(
alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
)
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_example = (
np.expand_dims(adversarial_example, axis=0).astype(np.float32) / 255
)
with tf.Session() as sess:
original_example = sess.run(
imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
normed=False,
)
.make_one_shot_iterator()
.get_next()[0]
)
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
("pvc_in_class_in_rest", example_trace_in_class_in_rest),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
for k, base_class_id in zip(range(1, topk_calc_range + 1), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace, input_fn),
f"original.top{k}",
),
}
for k, base_class_id in zip(
range(1, topk_calc_range + 1), adversarial_label_top5
):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"perturbation": np.linalg.norm(
adversarial_example - original_example
)
/ original_example.size,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_logit_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, class_ids: List[int], trace: AttrMap, input_fn
):
rest_class_ids = class_ids.copy()
if base_class_id in rest_class_ids:
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[get_class_trace(class_id) for class_id in rest_class_ids]
)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
# ("pvc_in_class_in_rest", example_trace_in_class_in_rest),
("pvc_in_class", example_trace_in_class),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
# if (class_id not in adversarial_label_top5) or (adversarial_label not in label_top5):
# return [{}] if per_node else {}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, label_top5, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, label_top5, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
adversarial_label_top5,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
topk=topk_share_range,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label_top5,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_ideal_metrics(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, rest_class_id: int, trace: AttrMap, input_fn
):
rest_class_trace = get_class_trace(rest_class_id)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
example_trace_share = merge_compact_trace_diff(
trace, example_trace_not_in_class_not_in_rest
)
example_trace_specific = merge_compact_trace_diff(
trace, example_trace_not_in_class_in_rest
)
predicted_value_contributions = {
key: get_predicted_value_contribution(
current_trace,
graph=graph,
class_id=base_class_id,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
for key, current_trace in [
("pvc_total", trace),
("pvc_share", example_trace_share),
("pvc_specific", example_trace_specific),
# ("pvc_in_class_in_rest", example_trace_in_class_in_rest),
("pvc_in_class", example_trace_in_class),
(
"pvc_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
# ("pvc_not_in_class_in_rest", example_trace_not_in_class_in_rest),
# ("pvc_not_in_class_not_in_rest", example_trace_not_in_class_not_in_rest),
]
}
overlap_sizes = {
key: calc_trace_size(current_trace, compact=True)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**predicted_value_contributions,
**overlap_sizes,
}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_label, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, class_id, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
"original_class_rank_in_adversarial_example": get_rank(
class_id=class_id,
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
),
"target_class_rank_in_original_example": get_rank(
class_id=adversarial_label,
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
),
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_fc_layer_path_ideal_metrics(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
path_layer_name = graph.layers()[-11]
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
trace = compact_trace(trace, graph, per_channel=per_channel)
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
class_trace_paths = {}
def get_class_trace_path(class_id: int) -> AttrMap:
if class_id not in class_trace_paths:
class_trace = get_class_trace(class_id)
class_trace_paths[class_id] = get_trace_path_in_fc_layers(
graph, class_trace, compact=True
)
return class_trace_paths[class_id]
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_path = get_trace_path_in_fc_layers(
graph, trace, compact=True
)
trace_path_intersection = get_trace_path_intersection_in_fc_layers(
trace, class_trace, graph=graph, compact=True
)
return {
"overlap_size": calc_trace_path_num(
trace_path_intersection, path_layer_name
),
"trace_path_size": calc_trace_path_num(
example_trace_path, path_layer_name
),
"class_trace_path_size": calc_trace_path_num(
get_class_trace_path(base_class_id), path_layer_name
),
}
row = {}
row = {
**row,
**map_prefix(get_overlap(class_id, trace), f"original.origin"),
}
row = {
**row,
**map_prefix(
get_overlap(adversarial_label, adversarial_trace),
f"adversarial.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(adversarial_label, trace_target_class),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_trace_original_class),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_ideal_metrics_per_layer(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = alexnet_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, rest_class_id: int, trace: AttrMap, input_fn
):
rest_class_trace = get_class_trace(rest_class_id)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace, layer_name, compact=True
)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**overlap_sizes,
}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_label, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, class_id, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
merge_compact_trace_intersect(
trace_target_class, adversarial_trace
),
adversarial_input_fn,
),
f"shared.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
merge_compact_trace_intersect(
adversarial_trace_original_class, trace
),
adversarial_input_fn,
),
f"shared.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_real_metrics_per_layer(rank: int = None, **kwargs):
return (
imagenet_real_metrics_per_layer_per_rank
if rank
else imagenet_real_metrics_per_layer_v2
)(
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
rank=rank,
**kwargs,
)
def imagenet_real_metrics_per_layer(
model_config: ModelConfig,
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
use_weight: bool = False,
support_diff: bool = True,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=model_config.class_from_zero,
# preprocessing_fn=model_config.preprocessing_fn)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
# if class_id not in class_traces:
# class_traces[class_id] = class_trace_fn(class_id).load()
# return class_traces[class_id]
return class_trace_fn(class_id).load()
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
layer_name,
compact=True,
key=TraceKey.WEIGHT
if use_weight
else TraceKey.EDGE,
)
for key, current_trace in [
("overlap_size_total", trace),
("overlap_size_in_class", example_trace_in_class),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return overlap_sizes
row = {}
row = {
**row,
**map_prefix(get_overlap(class_id, trace), f"original.origin"),
}
row = {
**row,
**map_prefix(
get_overlap(adversarial_label, adversarial_trace),
f"adversarial.target",
),
}
if support_diff:
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([label_top5[1]]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(label_top5[1], trace_target_class),
f"original.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label_top5[1]]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label_top5[1], adversarial_trace_original_class
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
images = (
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
)
images = map(
lambda class_with_image: (
class_with_image[0]
if model_config.class_from_zero
else class_with_image[0] + 1,
class_with_image[1],
),
images,
)
traces = ray_iter(get_row, images, chunksize=1, out_of_order=True, num_gpus=0)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def imagenet_real_metrics_per_layer_v2(
model_config: ModelConfig,
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
use_weight: bool = False,
support_diff: bool = True,
threshold: float = None,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
assert threshold is not None
trace = imagenet_example_trace(
model_config=model_config,
attack_name="original",
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
threshold=threshold,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
).load()
if trace is None:
return [{}] if per_node else {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = imagenet_example_trace(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
threshold=threshold,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
).load()
if adversarial_trace is None:
return [{}] if per_node else {}
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
# if class_id not in class_traces:
# class_traces[class_id] = class_trace_fn(class_id).load()
# return class_traces[class_id]
return class_trace_fn(class_id).load()
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
layer_name,
compact=True,
key=TraceKey.WEIGHT
if use_weight
else TraceKey.EDGE,
)
for key, current_trace in [
("overlap_size_total", trace),
("overlap_size_in_class", example_trace_in_class),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return overlap_sizes
row = {}
row = {
**row,
**map_prefix(get_overlap(class_id, trace), f"original.origin"),
}
row = {
**row,
**map_prefix(
get_overlap(adversarial_label, adversarial_trace),
f"adversarial.target",
),
}
if support_diff:
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([label_top5[1]]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(label_top5[1], trace_target_class),
f"original.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label_top5[1]]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label_top5[1], adversarial_trace_original_class
),
f"adversarial.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
images = (
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
)
images = map(
lambda class_with_image: (
class_with_image[0]
if model_config.class_from_zero
else class_with_image[0] + 1,
class_with_image[1],
),
images,
)
traces = ray_iter(get_row, images, chunksize=1, out_of_order=True, num_gpus=0)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def imagenet_real_metrics_per_layer_per_rank(
model_config: ModelConfig,
attack_name: str,
attack_fn,
generate_adversarial_fn,
trace_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
path: str,
rank: int,
use_weight: bool = False,
threshold: float = None,
use_point: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
assert threshold is not None
if attack_name == "normal":
trace = reconstruct_trace_from_tf_v2(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
trace_fn=partial(
trace_fn,
select_seed_fn=lambda output: arg_sorted_topk(output, rank)[
rank - 1 : rank
],
),
model_dir=model_dir,
rank=rank,
)[0]
else:
adversarial_example = imagenet_example(
model_config=model_config,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return {}
adversarial_input_fn = lambda: tf.data.Dataset.from_tensors(
model_config.normalize_fn(adversarial_example)
)
trace = reconstruct_trace_from_tf_v2(
model_fn=model_fn,
input_fn=adversarial_input_fn,
trace_fn=partial(
trace_fn,
select_seed_fn=lambda output: arg_sorted_topk(output, rank)[
rank - 1 : rank
],
),
model_dir=model_dir,
rank=rank,
)[0]
if trace is None:
return {}
label = trace.attrs[GraphAttrKey.SEED]
def get_class_trace(class_id: int) -> AttrMap:
return class_trace_fn(class_id).load()
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
if use_point:
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
graph.op(graph.id(layer_name))
.output_nodes[0]
.name,
compact=True,
key=TraceKey.POINT,
)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class",
example_trace_in_class,
),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
else:
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
layer_name,
compact=True,
key=TraceKey.WEIGHT
if use_weight
else TraceKey.EDGE,
)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class",
example_trace_in_class,
),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return overlap_sizes
trace = compact_trace(trace, graph, per_channel=per_channel)
row = {}
row = {**row, **get_overlap(label, trace)}
row = {"class_id": class_id, "image_id": image_id, "label": label, **row}
# print(row)
return row
images = (
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
)
images = map(
lambda class_with_image: (
class_with_image[0]
if model_config.class_from_zero
else class_with_image[0] + 1,
class_with_image[1],
),
images,
)
traces = list(
ray_iter(get_row, images, chunksize=1, out_of_order=True, num_gpus=0)
)
assert len(traces) == 1000
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces).sort_values(by=["class_id", "image_id"])
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_real_metrics_per_layer(rank: int = None, **kwargs):
return (
imagenet_real_metrics_per_layer_per_rank
if rank
else imagenet_real_metrics_per_layer_v2
)(model_config=RESNET_50, rank=rank, **kwargs)
def vgg_16_imagenet_real_metrics_per_layer(rank: int = None, **kwargs):
return (
imagenet_real_metrics_per_layer_per_rank
if rank
else imagenet_real_metrics_per_layer_v2
)(model_config=VGG_16, rank=rank, **kwargs)
def alexnet_imagenet_real_metrics_per_layer_targeted(target_class: int):
def metrics_fn(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
use_weight: bool = False,
support_diff: bool = True,
**kwargs,
):
return imagenet_real_metrics_per_layer_targeted(
target_class=target_class,
model_config=ALEXNET.with_model_dir("tf/alexnet/model_import"),
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_trace_fn=class_trace_fn,
select_fn=select_fn,
path=path,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
per_node=per_node,
per_channel=per_channel,
use_weight=use_weight,
support_diff=support_diff,
**kwargs,
)
return metrics_fn
def resnet_50_imagenet_real_metrics_per_layer_targeted(target_class: int):
def metrics_fn(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
use_weight: bool = False,
support_diff: bool = True,
**kwargs,
):
return imagenet_real_metrics_per_layer_targeted(
target_class=target_class,
model_config=RESNET_50,
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_trace_fn=class_trace_fn,
select_fn=select_fn,
path=path,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
per_node=per_node,
per_channel=per_channel,
use_weight=use_weight,
support_diff=support_diff,
**kwargs,
)
return metrics_fn
def imagenet_real_metrics_per_layer_targeted(
target_class: int,
model_config: ModelConfig,
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
path: str,
select_seed_fn: Callable[[np.ndarray], np.ndarray] = None,
entry_points: List[int] = None,
per_node: bool = False,
per_channel: bool = False,
use_weight: bool = False,
support_diff: bool = True,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath(model_config.model_dir)
create_model = lambda: model_config.network_class()
graph = model_config.network_class.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
if image_id == -1:
image_id = 0
while True:
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
try:
predicted_label = predict(
create_model=create_model,
input_fn=input_fn,
model_dir=model_dir,
)
if predicted_label != class_id:
image_id += 1
else:
break
except IndexError:
return [{}] if per_node else {}
else:
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=model_config.class_from_zero,
preprocessing_fn=model_config.preprocessing_fn,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=select_seed_fn,
entry_points=entry_points,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
trace = compact_trace(trace, graph, per_channel=per_channel)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
# if class_id not in class_traces:
# class_traces[class_id] = class_trace_fn(class_id).load()
# return class_traces[class_id]
return class_trace_fn(class_id).load()
def get_overlap(base_class_id: int, trace: AttrMap):
class_trace = get_class_trace(base_class_id)
example_trace_in_class = merge_compact_trace_intersect(
class_trace, trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace,
layer_name,
compact=True,
key=TraceKey.WEIGHT
if use_weight
else TraceKey.EDGE,
)
for key, current_trace in [
("overlap_size_total", trace),
("overlap_size_in_class", example_trace_in_class),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return overlap_sizes
row = {}
row = {
**row,
**map_prefix(get_overlap(class_id, trace), f"original.origin"),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([target_class]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(label_top5[1], trace_target_class), f"original.target"
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"label_top5": label_top5,
"label_top5_value": label_top5_value,
"label_value": label_top5_value[0],
**row,
}
print(row)
return row
images = [(target_class, image_id) for image_id in range(0, 40)] + [
(class_id, -1) for class_id in range(0, 1000) if class_id != target_class
]
images = map(
lambda class_with_image: (
class_with_image[0]
if model_config.class_from_zero
else class_with_image[0] + 1,
class_with_image[1],
),
images,
)
traces = ray_iter(get_row, images, chunksize=1, out_of_order=True, num_gpus=0)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_negative_example_ideal_metrics_per_layer(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
topk_share_range: int = 5,
topk_calc_range: int = 5,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
predicted_label = predict(
create_model=create_model, input_fn=input_fn, model_dir=model_dir
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_image_id = image_id + 1
while True:
adversarial_input_fn = lambda: imagenet_raw.test(
data_dir,
class_id,
adversarial_image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
try:
adversarial_predicted_label_rank = get_rank(
class_id=predicted_label,
create_model=create_model,
input_fn=adversarial_input_fn,
model_dir=model_dir,
)
except IndexError:
return [{}] if per_node else {}
if adversarial_predicted_label_rank == 0:
adversarial_image_id += 1
else:
if attack_name == "negative_example":
stop = True
elif attack_name == "negative_example_top5":
if adversarial_predicted_label_rank < 5:
stop = True
else:
stop = False
elif attack_name == "negative_example_out_of_top5":
if adversarial_predicted_label_rank >= 5:
stop = True
else:
stop = False
else:
raise RuntimeError()
if stop:
break
else:
adversarial_image_id += 1
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
# return class_trace_fn(class_id).load()
def get_overlap(
base_class_id: int, rest_class_id: int, trace: AttrMap, input_fn
):
rest_class_trace = get_class_trace(rest_class_id)
class_trace = get_class_trace(base_class_id)
class_specific_trace = merge_compact_trace_diff(
class_trace, rest_class_trace
)
example_specific_trace = merge_compact_trace_diff(
trace, rest_class_trace
)
example_trace_in_class_in_rest = merge_compact_trace_intersect(
class_trace, trace, rest_class_trace
)
example_trace_in_class_not_in_rest = merge_compact_trace_intersect(
class_specific_trace, example_specific_trace
)
example_trace_not_in_class_in_rest = merge_compact_trace_diff(
merge_compact_trace_intersect(trace, rest_class_trace), class_trace
)
example_trace_not_in_class_not_in_rest = merge_compact_trace_diff(
example_specific_trace, class_specific_trace
)
overlap_sizes = merge_dict(
*[
filter_value_not_null(
{
f"{layer_name}.{key}": calc_trace_size_per_layer(
current_trace, layer_name, compact=True
)
for key, current_trace in [
("overlap_size_total", trace),
(
"overlap_size_in_class_in_rest",
example_trace_in_class_in_rest,
),
(
"overlap_size_in_class_not_in_rest",
example_trace_in_class_not_in_rest,
),
(
"overlap_size_not_in_class_in_rest",
example_trace_not_in_class_in_rest,
),
(
"overlap_size_not_in_class_not_in_rest",
example_trace_not_in_class_not_in_rest,
),
]
}
)
for layer_name in graph.ops_in_layers()
]
)
return {
**calc_all_overlap(
class_specific_trace,
example_specific_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
),
**overlap_sizes,
}
row = {}
row = {
**row,
**map_prefix(
get_overlap(class_id, adversarial_label, trace, input_fn),
f"original.origin",
),
}
trace_target_class = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([adversarial_label]),
)[0]
trace_target_class = compact_trace(
trace_target_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label, class_id, trace_target_class, input_fn
),
f"original.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
adversarial_trace,
adversarial_input_fn,
),
f"adversarial.target",
),
}
row = {
**row,
**map_prefix(
get_overlap(
adversarial_label,
class_id,
merge_compact_trace_intersect(
trace_target_class, adversarial_trace
),
adversarial_input_fn,
),
f"shared.target",
),
}
adversarial_trace_original_class = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=adversarial_input_fn,
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
select_seed_fn=lambda _: np.array([class_id]),
)[0]
adversarial_trace_original_class = compact_trace(
adversarial_trace_original_class, graph, per_channel=per_channel
)
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
adversarial_trace_original_class,
adversarial_input_fn,
),
f"adversarial.origin",
),
}
row = {
**row,
**map_prefix(
get_overlap(
class_id,
adversarial_label,
merge_compact_trace_intersect(
adversarial_trace_original_class, trace
),
adversarial_input_fn,
),
f"shared.origin",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
"label_value": label_top5_value[0],
"adversarial_label_value": adversarial_label_top5_value[0],
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_top5_unique(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id,
input_fn=lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
input_fn=lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
# adversarial_example = alexnet_imagenet_example(
# attack_name=attack_name,
# attack_fn=attack_fn,
# generate_adversarial_fn=generate_adversarial_fn,
# class_id=class_id,
# image_id=image_id,
# ).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id,
input_fn=lambda: imagenet_raw.train(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
class_traces = {}
def get_class_trace(class_id: int) -> AttrMap:
if class_id not in class_traces:
class_traces[class_id] = class_trace_fn(class_id).load()
return class_traces[class_id]
def get_overlap(base_class_id: int, class_ids: List[int], trace: AttrMap):
class_trace = get_class_trace(base_class_id)
return calc_all_overlap(
trace,
class_trace,
overlap_fn,
compact=True,
use_intersect_size=True,
key=TraceKey.WEIGHT,
# key=TraceKey.EDGE,
)
row = {}
for k, base_class_id in zip(range(1, 6), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace),
f"original.top{k}",
),
}
for k, base_class_id in zip(range(1, 6), adversarial_label_top5):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id, adversarial_label_top5, adversarial_trace
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def resnet_50_imagenet_overlap_ratio_top5_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/resnet-50-v2/model")
create_model = lambda: ResNet50()
graph = ResNet50.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id, normed=False,
# class_from_zero=True, preprocessing_fn=alexnet_preprocess_image)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = resnet_50_imagenet_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(data_dir, class_id, image_id),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
assert trace is not None
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
assert class_id != adversarial_label
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
def get_overlap(base_class_id: int, class_ids: List[int], trace: AttrMap):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[class_trace_fn(class_id).load() for class_id in rest_class_ids]
)
class_trace = merge_compact_trace_diff(
class_trace_fn(base_class_id).load(), rest_class_trace
)
trace = merge_compact_trace_diff(trace, rest_class_trace)
return calc_all_overlap(
class_trace,
trace,
overlap_fn,
compact=True,
use_intersect_size=True,
)
row = {}
for k, base_class_id in zip(range(1, 3), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace),
f"original.top{k}",
),
}
for k, base_class_id in zip(range(1, 3), adversarial_label_top5):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id, adversarial_label_top5, adversarial_trace
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(1, 1001)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def lenet_mnist_overlap_ratio_top5_diff(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = abspath("/home/yxqiu/data/mnist/raw")
model_dir = abspath("tf/lenet/model_early")
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=model_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
# adversarial_example = generate_adversarial_fn(
# label=class_id,
# create_model=create_model,
# input_fn=lambda: mnist.test(data_dir, normed=False)
# .filter(lambda image, label:
# tf.equal(
# tf.convert_to_tensor(class_id, dtype=tf.int32),
# label)).skip(image_id).take(1).batch(1)
# .make_one_shot_iterator().get_next()[0],
# attack_fn=attack_fn,
# model_dir=model_dir,
# **kwargs,
# )
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=model_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
label_top5_value = trace.attrs[GraphAttrKey.PREDICT_TOP5_VALUE]
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_label_top5_value = adversarial_trace.attrs[
GraphAttrKey.PREDICT_TOP5_VALUE
]
if class_id == adversarial_label:
return [{}] if per_node else {}
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
assert (
class_id == label_top5[0]
and adversarial_label == adversarial_label_top5[0]
)
trace = compact_trace(trace, graph, per_channel=per_channel)
adversarial_trace = compact_trace(
adversarial_trace, graph, per_channel=per_channel
)
def get_overlap(base_class_id: int, class_ids: List[int], trace: AttrMap):
rest_class_ids = class_ids.copy()
rest_class_ids.remove(base_class_id)
rest_class_trace = merge_compact_trace(
*[class_trace_fn(class_id).load() for class_id in rest_class_ids]
)
class_trace = merge_compact_trace_diff(
class_trace_fn(base_class_id).load(), rest_class_trace
)
trace = merge_compact_trace_diff(trace, rest_class_trace)
return calc_all_overlap(
class_trace,
trace,
overlap_fn,
compact=True,
use_intersect_size=True,
)
row = {}
for k, base_class_id in zip(range(1, 3), label_top5):
row = {
**row,
**map_prefix(
get_overlap(base_class_id, label_top5, trace),
f"original.top{k}",
),
}
for k, base_class_id in zip(range(1, 3), adversarial_label_top5):
row = {
**row,
**map_prefix(
get_overlap(
base_class_id, adversarial_label_top5, adversarial_trace
),
f"adversarial.top{k}",
),
}
if per_node:
raise RuntimeError()
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
"label_top5_value": label_top5_value,
"adversarial_label_top5_value": adversarial_label_top5_value,
**row,
}
print(row)
return row
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 100)
for class_id in range(0, 10)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def alexnet_imagenet_overlap_ratio_top5(
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_node: bool = False,
per_channel: bool = False,
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
mode.check(False)
data_dir = IMAGENET_RAW_DIR
model_dir = abspath("tf/alexnet/model_import")
create_model = lambda: AlexNet()
graph = AlexNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook, create_model=create_model, graph=graph
)
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
if trace is None:
return {}
label_top5 = trace.attrs[GraphAttrKey.PREDICT_TOP5]
adversarial_example = generate_adversarial_fn(
label=class_id,
create_model=create_model,
input_fn=lambda: imagenet_raw.test(
data_dir,
class_id,
image_id,
normed=False,
class_from_zero=True,
preprocessing_fn=alexnet_preprocess_image,
)
.make_one_shot_iterator()
.get_next()[0],
attack_fn=attack_fn,
model_dir=model_dir,
**kwargs,
)
if adversarial_example is None:
return {}
adversarial_trace = reconstruct_trace_from_tf(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
imagenet.normalize_alexnet(adversarial_example)
),
select_fn=select_fn,
model_dir=model_dir,
top_5=True,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
adversarial_label_top5 = adversarial_trace.attrs[GraphAttrKey.PREDICT_TOP5]
if adversarial_label not in label_top5:
# if np.intersect1d(label_top5, adversarial_label_top5).size == 0:
def map_prefix(map: Dict[str, Any], prefix: str) -> Dict[str, Any]:
return {f"{prefix}.{key}": value for key, value in map.items()}
class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in label_top5]
)
adversarial_class_trace = merge_compact_trace(
*[class_trace_fn(label).load() for label in adversarial_label_top5]
)
trace = compact_edge(trace, graph, per_channel=per_channel)
adversarial_trace = compact_edge(
adversarial_trace, graph, per_channel=per_channel
)
if per_node:
rows = []
for node_name in class_trace.nodes:
row = {
"image_id": image_id,
"node_name": node_name,
"label": class_id,
"adversarial_label": adversarial_label,
**map_prefix(
calc_all_overlap(
class_trace, trace, overlap_fn, node_name
),
"original",
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace,
adversarial_trace,
overlap_fn,
node_name,
),
"adversarial",
),
}
if (
row[f"original.{TraceKey.WEIGHT}"] is not None
or row[f"original.{TraceKey.EDGE}"] is not None
):
rows.append(row)
return rows
else:
row = {
"image_id": image_id,
"label": class_id,
"adversarial_label": adversarial_label,
"label_top5": label_top5,
"adversarial_label_top5": adversarial_label_top5,
**map_prefix(
calc_all_overlap(class_trace, trace, overlap_fn), "original"
),
**map_prefix(
calc_all_overlap(
adversarial_class_trace, adversarial_trace, overlap_fn
),
"adversarial",
),
}
print(row)
return row
else:
return [{}] if per_node else {}
traces = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, 1)
for class_id in range(0, 1000)
),
chunksize=1,
out_of_order=True,
num_gpus=0,
)
if per_node:
traces = list(itertools.chain.from_iterable(traces))
traces = [trace for trace in traces if len(trace) != 0]
return | pd.DataFrame(traces) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Derek on 2021/10/26
"""
-------------------------------------------------
File Name : main
Description : Extract bank and market cap data from the JSON file bank_market_cap.json,
Transform the market cap currency using the exchange rate data,
Load the transformed data into a seperated CSV
Author : derek
Email : <EMAIL>
-------------------------------------------------
Change Activity:
2021/10/26: create
-------------------------------------------------
"""
__author__ = 'derek'
import glob
import pandas as pd
from datetime import datetime
def extract_data_from_json(jsonfile):
dataframe = | pd.read_json(jsonfile) | pandas.read_json |
#!/usr/bin/env python
import os
import sys
import h5py
import logging
import traceback
import warnings
import numpy as np
import scipy.cluster.hierarchy
import scipy.spatial.distance as ssd
from collections import defaultdict
import inStrain.SNVprofile
import inStrain.readComparer
import inStrain.profile.profile_utilities
import matplotlib
matplotlib.use('Agg')
import matplotlib.ticker as ticker
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import seaborn as sns
import drep.d_cluster
import drep.d_analyze
matplotlib.rcParams['pdf.fonttype'] = 42
def mm_plot(db, left_val='breadth', right_val='coverage', title='',\
minANI=0.9):
'''
The input db for this is "mm_genome_info" from "makeGenomeWide" in genomeUtilities.py
'''
db = db.sort_values('ANI_level')
sns.set_style('white')
# breadth
fig, ax1 = plt.subplots()
ax1.plot(db['ANI_level'], db[left_val], ls='-', color='blue')
if left_val == 'breadth':
ax1.plot(db['ANI_level'], estimate_breadth(db['coverage']), ls='--', color='lightblue')
ax1.set_ylabel(left_val, color='blue')
ax1.set_xlabel('Minimum read ANI level')
ax1.set_ylim(0,1)
# coverage
ax2 = ax1.twinx()
ax2.plot(db['ANI_level'], db[right_val], ls='-', color='red')
ax2.set_ylabel(right_val, color='red')
ax2.set_ylim(0,)
# asthetics
plt.xlim(1, max(minANI, db['ANI_level'].min()))
plt.title(title)
def estimate_breadth(coverage):
'''
Estimate breadth based on coverage
Based on the function breadth = -1.000 * e^(0.883 * coverage) + 1.000
'''
return (-1) * np.exp(-1 * ((0.883) * coverage)) + 1
# def genome_wide_plot(IS_locs, scaffolds, what='coverage', ANI_levels=[100, 98, 0], window_len=1000):
# '''
# Arguments:
# IS_locs = list of IS objects
# scaffolds = list of scaffolds to profile and plot (in order)
# Keyword arguments:
# ANI_levels = list of ANI levesl to plot
# window_len = length of each window to profile
# '''
# if what == 'coverage':
# item = 'covT'
# elif what == 'clonality':
# item = 'clonT'
# # Load coverages for the scaffolds at each ANI level
# dbs = []
# for IS_loc in IS_locs:
# IS = inStrain.SNVprofile.SNVprofile(IS_loc)
# if what in ['coverage', 'clonality']:
# wdb, breaks = load_windowed_coverage(IS, scaffolds, window_len=window_len, ANI_levels=ANI_levels, item=item)
# elif what in ['linkage']:
# wdb, breaks = load_windowed_linkage(IS, scaffolds, window_len=window_len, ANI_levels=ANI_levels)
# elif what in ['snp_density']:
# wdb, breaks = load_windowed_SNP_density(IS, scaffolds, window_len=window_len, ANI_levels=ANI_levels)
# wdb['name'] = os.path.basename(IS_loc)
# dbs.append(wdb)
# Wdb = pd.concat(dbs, sort=True)
# # Make the plot
# multiple_coverage_plot(Wdb, breaks, thing=what)
# return Wdb, breaks
def load_windowed_metrics(scaffolds, s2l, rLen, metrics=None, window_len=None, ANI_levels=[0, 100],
min_scaff_len=0, report_midpoints=False, covTs=False, clonTs=False,
raw_linkage_table=False, cumulative_snv_table=False):
if metrics is None:
metrics = ['coverage', 'nucl_diversity', 'linkage', 'snp_density']
if type(metrics) != type([]):
print("Metrics must be a list")
return
# Figure out the MMs needed
#rLen = IS.get_read_length()
mms = [_get_mm(None, ANI, rLen=rLen) for ANI in ANI_levels]
# Sort the scaffolds
#s2l = IS.get('scaffold2length')
scaffolds = sorted(scaffolds, key=s2l.get, reverse=True)
if min_scaff_len > 0:
scaffolds = [s for s in scaffolds if s2l[s] >= min_scaff_len]
# Figure out the window length
if window_len == None:
window_len = int(sum([s2l[s] for s in scaffolds]) / 100)
else:
window_len = int(window_len)
# Calculate the breaks
breaks = []
midpoints = {}
tally = 0
for scaffold in scaffolds:
midpoints[scaffold] = tally + int(s2l[scaffold] / 2)
tally += s2l[scaffold]
breaks.append(tally)
dbs = []
if 'coverage' in metrics:
if covTs == False:
logging.error("need covTs for coverage")
raise Exception
cdb = load_windowed_coverage_or_clonality('coverage', covTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'coverage'
dbs.append(cdb)
# if 'clonality' in metrics:
# cdb = load_windowed_coverage_or_clonality(IS, 'clonality', scaffolds, window_len, mms, ANI_levels, s2l)
# cdb['metric'] = 'clonality'
# dbs.append(cdb)
if 'nucl_diversity' in metrics:
if clonTs == False:
logging.error("need clonTs for microdiversity")
raise Exception
cdb = load_windowed_coverage_or_clonality('nucl_diversity', clonTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'nucl_diversity'
dbs.append(cdb)
if 'linkage' in metrics:
if raw_linkage_table is False:
logging.error("need raw_linkage_table for linkage")
raise Exception
cdb = load_windowed_linkage(raw_linkage_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'linkage'
dbs.append(cdb)
if 'snp_density' in metrics:
if cumulative_snv_table is False:
logging.error("need cumulative_snv_table for snp_density")
raise Exception
if len(cumulative_snv_table) > 0:
cdb = load_windowed_SNP_density(cumulative_snv_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'snp_density'
dbs.append(cdb)
if len(dbs) > 0:
Wdb = pd.concat(dbs, sort=True)
Wdb = Wdb.rename(columns={'avg_cov':'value'})
else:
Wdb = pd.DataFrame()
# Add blanks at the breaks
table = defaultdict(list)
for mm, ani in zip(mms, ANI_levels):
for metric in Wdb['metric'].unique():
for bre in breaks:
table['scaffold'].append('break')
table['mm'].append(mm)
table['ANI'].append(ani)
table['adjusted_start'].append(bre) # The minus one makes sure it doenst split things it shouldnt
table['adjusted_end'].append(bre)
table['value'].append(np.nan)
table['metric'].append(metric)
bdb = pd.DataFrame(table)
Wdb = pd.concat([Wdb, bdb], sort=False)
if len(Wdb) > 0:
Wdb.loc[:,'midpoint'] = [np.mean([x, y]) for x, y in zip(Wdb['adjusted_start'], Wdb['adjusted_end'])]
Wdb = Wdb.sort_values(['metric', 'mm', 'midpoint', 'scaffold'])
if report_midpoints:
return Wdb, breaks, midpoints
else:
return Wdb, breaks
def load_windowed_coverage_or_clonality(thing, covTs, scaffolds, window_len, mms, ANI_levels, s2l):
'''
Get the windowed coverage
Pass in a clonTs for microdiversity and covTs for coverage
'''
if thing == 'coverage':
item = 'covT'
elif thing == 'nucl_diversity':
item = 'clonT'
else:
print("idk what {0} is".format(thing))
return
# Get the covTs
#covTs = IS.get(item, scaffolds=scaffolds)
# Make the windows
dbs = []
tally = 0
breaks = []
for scaffold in scaffolds:
if scaffold not in covTs:
tally += s2l[scaffold]
breaks.append(tally)
continue
else:
covT = covTs[scaffold]
for mm, ani in zip(mms, ANI_levels):
if item == 'covT':
cov = inStrain.profile.profile_utilities.mm_counts_to_counts_shrunk(covT, mm)
if len(cov) == 0:
continue
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold])
elif item == 'clonT':
cov = _get_basewise_clons3(covT, mm)
if len(cov) == 0:
continue
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold], full_len=False)
db.loc[:,'avg_cov'] = [1 - x if x == x else x for x in db['avg_cov']]
db['scaffold'] = scaffold
db['mm'] = mm
db['ANI'] = ani
db.loc[:,'adjusted_start'] = db['start'] + tally
db.loc[:,'adjusted_end'] = db['end'] + tally
dbs.append(db)
tally += s2l[scaffold]
breaks.append(tally)
if len(dbs) > 0:
Wdb = pd.concat(dbs)
else:
Wdb = pd.DataFrame()
return Wdb#, breaks
def load_windowed_linkage(Ldb, scaffolds, window_len, mms, ANI_levels, s2l, on='r2'):
# Get the linkage table
#Ldb = IS.get('raw_linkage_table')
Ldb = Ldb[Ldb['scaffold'].isin(scaffolds)].sort_values('mm')
got_scaffolds = set(Ldb['scaffold'].unique())
# Make the windows
dbs = []
tally = 0
breaks = []
for scaffold in scaffolds:
if scaffold not in got_scaffolds:
tally += s2l[scaffold]
breaks.append(tally)
continue
else:
ldb = Ldb[Ldb['scaffold'] == scaffold]
for mm, ani in zip(mms, ANI_levels):
db = ldb[ldb['mm'] <= int(mm)].drop_duplicates(subset=['scaffold', 'position_A', 'position_B'], keep='last')
cov = db.set_index('position_A')[on].sort_index()
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold], full_len=False)
db['scaffold'] = scaffold
db['mm'] = mm
db['ANI'] = ani
db['adjusted_start'] = db['start'] + tally
db['adjusted_end'] = db['end'] + tally
dbs.append(db)
tally += s2l[scaffold]
breaks.append(tally)
if len(dbs) > 0:
Wdb = | pd.concat(dbs) | pandas.concat |
import pandas as pd
import numpy as np
import unittest
class TestSeriesSub(unittest.TestCase):
"""
Test the Pandas.Series.sub function
sub() will subtract the elements of the corresponding indices from the Series it's called on and the Series passed as'other' argument.
This tests focuses only on the parameters 'others',and the Series it's called on.
The other parameters are not set and tested with their default values.
"""
def setUp(self):
# regular subtraction
self.series1a = pd.Series([4,1,2,3])
self.series1b = pd.Series([6,9,4,8])
self.series1c = pd.Series([-2,-8,-2,-5])
# subtraction with NaN
self.series2a = pd.Series([1,np.nan,2,3])
self.series2b = pd.Series([4,np.nan,np.nan,6])
self.series2c = pd.Series([-3,np.nan,np.nan,-3])
# subtraction with large series
self.series3b = pd.Series(range(0,300000))
self.series3a = pd.Series(range(0,600000,2))
self.series3c = self.series3b
# subtraction with empty series
self.series4 = pd.Series([],dtype='int')
# subtraction with different lengths
self.series5a = pd.Series([2,4,5,6,8,9,10])
self.series5b = self.series1b
self.series5c = | pd.Series([-4,-5,1,-2,np.nan,np.nan,np.nan]) | pandas.Series |
"""Train the model"""
import argparse
import logging
from tensorboardX import SummaryWriter
import os, shutil
import numpy as np
import pandas as pd
from sklearn.utils.class_weight import compute_class_weight
import torch
import torch.optim as optim
import torchvision.models as models
from torch.autograd import Variable
from tqdm import tqdm
# from torchsummary import summary
import utils
import json
import model.net as net
import model.data_loader as data_loader
from evaluate import evaluate, evaluate_predictions
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='data', help="Directory containing the dataset")
parser.add_argument('--model-dir', default='experiments', help="Directory containing params.json")
parser.add_argument('--setting-dir', default='settings', help="Directory with different settings")
parser.add_argument('--setting', default='collider-pf', help="Directory contain setting.json, experimental setting, data-generation, regression model etc")
parser.add_argument('--fase', default='xybn', help='fase of training model, see manuscript for details. x, y, xy, bn, or feature')
parser.add_argument('--experiment', default='', help="Manual name for experiment for logging, will be subdir of setting")
parser.add_argument('--restore-file', default=None,
help="Optional, name of the file in --model_dir containing weights to reload before \
training") # 'best' or 'train'
parser.add_argument('--restore-last', action='store_true', help="continue a last run")
parser.add_argument('--restore-warm', action='store_true', help="continue on the run called 'warm-start.pth'")
parser.add_argument('--use-last', action="store_true", help="use last state dict instead of 'best' (use for early stopping manually)")
parser.add_argument('--cold-start', action='store_true', help="ignore previous state dicts (weights), even if they exist")
parser.add_argument('--warm-start', dest='cold_start', action='store_false', help="start from previous state dict")
parser.add_argument('--disable-cuda', action='store_true', help="Disable Cuda")
parser.add_argument('--no-parallel', action="store_false", help="no multiple GPU", dest="parallel")
parser.add_argument('--parallel', action="store_true", help="multiple GPU", dest="parallel")
parser.add_argument('--intercept', action="store_true", help="dummy run for getting intercept baseline results")
parser.add_argument('--visdom', action='store_true', help='generate plots with visdom')
parser.add_argument('--novisdom', dest='visdom', action='store_false', help='dont plot with visdom')
parser.add_argument('--monitor-grads', action='store_true', help='keep track of mean norm of gradients')
parser.set_defaults(parallel=False, cold_start=True, use_last=False, intercept=False, restore_last=False, save_preds=False,
monitor_grads=False, restore_warm=False, visdom=False)
def train(model, optimizer, loss_fn, dataloader, metrics, params, setting, writer=None, epoch=None,
mines=None, optims_mine=None):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data
metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch
params: (Params) hyperparameters
num_steps: (int) number of batches to train on, each of size params.batch_size
"""
global train_tensor_keys, logdir
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = utils.RunningAverage()
# create storate for tensors for OLS after minibatches
ts = []
Xs = []
Xtrues = []
Ys = []
Xhats = []
Yhats = []
Zhats = []
# Use tqdm for progress bar
with tqdm(total=len(dataloader)) as progress_bar:
for i, batch in enumerate(dataloader):
summary_batch = {}
# put batch on cuda
batch = {k: v.to(params.device) for k, v in batch.items()}
if not (setting.covar_mode and epoch > params.suppress_t_epochs):
batch["t"] = torch.zeros_like(batch['t'])
Xs.append(batch['x'].detach().cpu())
Xtrues.append(batch['x_true'].detach().cpu())
# compute model output and loss
output_batch = model(batch['image'], batch['t'].view(-1,1), epoch)
Yhats.append(output_batch['y'].detach().cpu())
# calculate loss
if args.fase == "feature":
# calculate loss for z directly, to get clear how well this can be measured
loss_fn_z = torch.nn.MSELoss()
loss_z = loss_fn_z(output_batch["y"].squeeze(), batch["z"])
loss = loss_z
summary_batch["loss_z"] = loss_z.item()
else:
loss_fn_y = torch.nn.MSELoss()
loss_y = loss_fn_y(output_batch["y"].squeeze(), batch["y"])
loss = loss_y
summary_batch["loss_y"] = loss_y.item()
# calculate other losses based on estimation of x
if params.use_mi:
mi_losses = {}
# MINE mutual information calculate bottleneck loss
for (mi_name, mi_estimator), mi_optim in zip(mines.items(), optims_mine.values()):
if 'monitor' in mi_name:
bottleneck_name = mi_name.split("_")[1]
target_name = mi_name.split("_")[2]
else:
bottleneck_name = mi_name.split("_")[0]
target_name = mi_name.split("_")[1]
mi_bn = output_batch[bottleneck_name]
if "bn" in target_name:
mi_target = output_batch[target_name]
else:
mi_target = batch[target_name].view(-1,1)
for _ in range(params.num_mi_steps):
# update the MI estimator network for n steps
mi_loss = mi_estimator.lower_bound(mi_bn.detach(), mi_target.detach())
mi_optim.zero_grad()
mi_loss.backward(retain_graph=True)
mi_optim.step()
# after updating mi network, calculate MI for downward loss
mi_loss = mi_estimator.lower_bound(mi_bn, mi_target)
mi_losses[mi_name] = mi_loss
# store mutual information
summary_batch["mi_" + mi_name] = -1*mi_loss.item()
# calculate spearman rho
if mi_bn.shape[1] == 1:
summary_batch[mi_name + "_rho"] = net.spearmanrho(mi_target.detach().cpu(), mi_bn.detach().cpu())
# calculate loss for colllider x
if params.loss_x_type == 'mi':
loss_x = mi_losses['bnx_x']
elif params.loss_x_type == 'least-squares':
# if not using mutual information to make bottleneck layer close to x, directly predict x with the CNN
loss_fn_x = torch.nn.MSELoss()
loss_x = loss_fn_x(output_batch["bnx"].squeeze(), batch["x"])
else:
raise NotImplementedError(f'x loss not implemented: {params.loss_x_type}, should be in mi, least-squares')
summary_batch["loss_x"] = loss_x.item()
if not params.alpha == 1:
# possibly weigh down contribution of estimating x
loss_x *= params.alpha
summary_batch["loss_x_weighted"] = loss_x.item()
# add x loss to total loss
loss += loss_x
# add least squares regression on final layer
if params.do_least_squares:
X = batch["x"].view(-1,1)
t = batch["t"].view(-1,1)
Z = output_batch["bnz"]
if Z.ndimension() == 1:
Z.unsqueeze_(1)
Xhat = output_batch["bnx"]
# add intercept
Zi = torch.cat([torch.ones_like(t), Z], 1)
# add treatment info
Zt = torch.cat([Zi, t], 1)
Y = batch["y"].view(-1,1)
# regress y on final layer, without x
betas_y = net.cholesky_least_squares(Zt, Y, intercept=False)
y_hat = Zt.matmul(betas_y).view(-1,1)
mse_y = ((Y - y_hat)**2).mean()
summary_batch["regr_b_t"] = betas_y[-1].item()
summary_batch["regr_loss_y"] = mse_y.item()
# regress x on final layer without x
betas_x = net.cholesky_least_squares(Zi, Xhat, intercept=False)
x_hat = Zi.matmul(betas_x).view(-1,1)
mse_x = ((Xhat - x_hat)**2).mean()
# store all tensors for single pass after epoch
Xhats.append(Xhat.detach().cpu())
Zhats.append(Z.detach().cpu())
ts.append(t.detach().cpu())
Ys.append(Y.detach().cpu())
summary_batch["regr_loss_x"] = mse_x.item()
# add loss_bn only after n epochs
if params.bottleneck_loss and epoch > params.bn_loss_lag_epochs:
# only add to loss when bigger than margin
if params.bn_loss_type == "regressor-least-squares":
if params.bn_loss_margin_type == "dynamic-mean":
# for each batch, calculate loss of just using mean for predicting x
mse_x_mean = ((X - X.mean())**2).mean()
loss_bn = torch.max(torch.zeros_like(mse_x), mse_x_mean - mse_x)
elif params.bn_loss_margin_type == "fixed":
mse_diff = params.bn_loss_margin - mse_x
loss_bn = torch.max(torch.zeros_like(mse_x), mse_diff)
else:
raise NotImplementedError(f'bottleneck loss margin type not implemented: {params.bn_loss_margin_type}')
elif params.bn_loss_type == 'mi':
loss_bn = -1*mi_losses[params.bn_loss_mi]
#loss_bn = torch.max(torch.ones_like(loss_bn)*params.bn_loss_margin, loss_bn)
else:
raise NotImplementedError(f'currently not implemented bottleneck loss type: {params.bn_loss_type}')
# possibly reweigh bottleneck loss and add to total loss
summary_batch["loss_bn"] = loss_bn.item()
# note is this double?
if loss_bn > params.bn_loss_margin:
loss_bn *= params.bottleneck_loss_wt
loss += loss_bn
# perform parameter update
optimizer.zero_grad()
loss.backward()
optimizer.step()
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
# if necessary, write out tensors
if params.monitor_train_tensors and (epoch % params.save_summary_steps == 0):
tensors = {}
for tensor_key in train_tensor_keys:
if tensor_key in batch.keys():
tensors[tensor_key] = batch[tensor_key].squeeze().numpy()
elif tensor_key.endswith("hat"):
tensor_key = tensor_key.split("_")[0]
if tensor_key in output_batch.keys():
tensors[tensor_key+"_hat"] = output_batch[tensor_key].detach().cpu().squeeze().numpy()
else:
assert False, f"key not found: {tensor_key}"
# print(tensors)
df = | pd.DataFrame.from_dict(tensors, orient='columns') | pandas.DataFrame.from_dict |
import argparse
import pandas as pd
import seaborn as sns
import json
import re
import os
import copy
import subprocess
import time
from omics_tools import differential_expression, utils, comparison_generator
from collections import Counter, defaultdict
from multiprocessing import Pool, get_context
import parallel_comparison
import matplotlib.pyplot as plt
# requests prefers simplejson
try:
import simplejson as json
from simplejson.errors import JSONDecodeError
except ImportError:
import json
from json.decoder import JSONDecodeError
#%matplotlib inline
#import seaborn as sns
#sns.set()
def qc_update(df, factors_to_keep, bool_factors, int_factors):
df = df[ (df['qc_gcorr_bool']==True) & (df['qc_nmap_bool']==True) ]
patterns_to_filter = ["qc_", "_unit", "_input_state"]
columns_to_filter = ["replicate", "sample_id", "temperature", "timepoint","dextrose"]
for col in df.columns:
if col not in factors_to_keep and (any(p in col.lower() for p in patterns_to_filter) or col.lower() in columns_to_filter):
df.drop(col, axis=1, inplace=True)
strain_column = ""
for col in bool_factors:
df[col] = df[col].astype(bool)
for col in int_factors:
df[col] = pd.to_numeric(df[col]).astype('int64')
return df
def create_additive_design(df,int_cols=['iptg','arabinose']):
#genes = set(df.index)
#genes_index = pd.DataFrame(list(range(len(genes))),columns=['Num_Index'],index=genes)
#df = df.join(genes_index)
for col in df.columns:
if col in int_cols:
df[col]=df[col].astype(int)
#df = pd.get_dummies(df,columns=['Timepoint'])
#df_test = pd.get_dummies(df,columns=['Num_Index'])
#return df_test
return df
def comparison_heatmap(cfm_input_df, exp_condition_cols, target_column, replicates=True, figure_name='comparison_heatmap'):
additional_conditions = []
if replicates == True:
additional_conditions.append('replicate')
print("exp_condition_cols: {}".format(exp_condition_cols))
start = time.perf_counter()
prior_comparisons = set()
execution_space = list()
data = defaultdict(list)
condition_groups = cfm_input_df.groupby(exp_condition_cols + additional_conditions)
print("cfm_input_df")
print(cfm_input_df.head(5))
cols_of_interest0 = cfm_input_df[exp_condition_cols + additional_conditions].drop_duplicates()
sort_keys = exp_condition_cols + additional_conditions
cols_of_interest1 = cols_of_interest0.sort_values(sort_keys)
cols_of_interest = cols_of_interest1.values
for a,condition1 in enumerate(cols_of_interest):
for b,condition2 in enumerate(cols_of_interest):
# current_comparison = frozenset(Counter((a,b)))
if (a,b) not in prior_comparisons:
execution_space.append((condition1, condition2, condition_groups, target_column))
prior_comparisons.add((a,b))
prior_comparisons.add((b,a))
num_processors=os.cpu_count()
pool = Pool(processes = num_processors)
output = pool.map(parallel_comparison.perform_matrix_calculation, execution_space)
pool.close()
for comparison_i,item in enumerate(execution_space):
condition1,condition2,_pass1,_pass2 = item
data['condition1'].append(", ".join(map(str, condition1)))
data['condition2'].append(", ".join(map(str, condition2)))
for i,variable in enumerate(condition1):
data['{}_1'.format((exp_condition_cols + additional_conditions)[i])].append(condition1[i])
for i,variable in enumerate(condition2):
data['{}_2'.format((exp_condition_cols + additional_conditions)[i])].append(condition2[i])
data['comparison'].append(output[comparison_i])
data['condition1'].append(", ".join(map(str, condition2)))
data['condition2'].append(", ".join(map(str, condition1)))
for i,variable in enumerate(condition1):
data['{}_1'.format((exp_condition_cols + additional_conditions)[i])].append(condition1[i])
for i,variable in enumerate(condition2):
data['{}_2'.format((exp_condition_cols + additional_conditions)[i])].append(condition2[i])
data['comparison'].append(output[comparison_i])
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
| tm.assert_index_equal(res, exp) | pandas.util.testing.assert_index_equal |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
import seaborn as sns
def plot_comparison_mission_2():
# Load the various results, being sure to add a column to represent the method
results_df = | pd.DataFrame() | pandas.DataFrame |
"""
Seq2seq-style ACRe
"""
import numpy as np
import pandas as pd
from collections import defaultdict, Counter
from tqdm import tqdm, trange
import string
import itertools
import os
import json
import scipy.stats
import random
from argparse import Namespace
import copy
from sklearn.metrics import adjusted_mutual_info_score, mutual_info_score
import torch
from torch.utils.data import DataLoader
from torch import nn, optim
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
from data import language
from data.shapeworld import concept_to_lf, lf_to_concept, get_unique_concepts
from data.loader import load_dataloaders
from models import seq2seq
import util
class AddFusion(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, y):
return x + y
class MeanFusion(AddFusion):
def forward(self, x, y):
res = super().forward(x, y)
return res / 2.0
class MultiplyFusion(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, y):
return x * y
class MLPFusion(nn.Module):
def __init__(self, x_size, n_layers=1):
super().__init__()
self.x_size = x_size
self.n_layers = n_layers
layers = []
for i in range(n_layers):
if i == 0:
layers.append(nn.Linear(self.x_size * 2, self.x_size))
else:
layers.append(nn.ReLU())
layers.append(self.x_size, self.x_size)
self.mlp = nn.Sequential(*layers)
def forward(self, x, y):
xy = torch.cat([x, y], 1)
return self.mlp(xy)
FUSION_MODULES = {
"add": AddFusion,
"average": MeanFusion,
"multiply": MultiplyFusion,
"mlp": MLPFusion,
}
def get_transformer_encoder_decoder(
vocab_size,
embedding_size,
hidden_size,
intermediate_size,
num_hidden_layers=2,
num_attention_heads=2,
max_position_embeddings=60,
hidden_act="relu",
):
from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
config = BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
)
config = EncoderDecoderConfig.from_encoder_decoder_configs(config, config)
model = EncoderDecoderModel(config=config)
model.config.decoder.is_decoder = True
model.config.decoder.add_cross_attention = True
return model
def get_length_from_output(out):
"""
I use these much slower functions instead of (a != language.PAD_IDX).sum(1)
because there's an annoying possibility that we sample pad indices.
"""
batch_size = out.shape[0]
length = torch.zeros((batch_size,), dtype=torch.int64, device=out.device)
for i in range(batch_size):
first_eos = (out[i] == language.EOS_IDX).nonzero()[0]
length[i] = first_eos + 1
return length
def get_mask_from_length(length):
"""
Slow way to get mask from length
"""
batch_size = length.shape[0]
max_len = length.max()
mask = torch.zeros((batch_size, max_len), dtype=torch.uint8, device=length.device)
for i in range(batch_size):
this_len = length[i]
mask[i, :this_len] = 1
return mask
# Transformer-based encoder decoder model, accepts varying levels of operands
# that are just concatted with special sep token
class OpT(nn.Module):
def __init__(self, vocab_size, embedding_size, hidden_size):
super().__init__()
# + 1 due to special sep token.
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.sep_idx = vocab_size
self.seq2seq = get_transformer_encoder_decoder(
vocab_size + 1,
embedding_size,
hidden_size,
hidden_size,
)
def forward(self, inp, y, y_len):
# Concatenate x1/x2
if inp:
inp_seq, inp_len = self.concatenate_input(inp)
else:
# Just pass in single tokens for encoder and decoder
batch_size = y.shape[0]
inp_seq = torch.ones((batch_size, 1), dtype=y.dtype, device=y.device)
inp_len = torch.ones((batch_size,), dtype=y.dtype, device=y.device)
inp_mask = get_mask_from_length(inp_len)
y_mask = get_mask_from_length(y_len)
output = self.seq2seq(
input_ids=inp_seq,
attention_mask=inp_mask,
decoder_input_ids=y,
decoder_attention_mask=y_mask,
labels=y.clone(),
)
decode_len = (y_len - 1).cpu()
logits = pack_padded_sequence(
output.logits.cuda(),
decode_len,
enforce_sorted=False,
batch_first=True,
).data
targets = pack_padded_sequence(
y[:, 1:], decode_len, enforce_sorted=False, batch_first=True
).data
acc = (logits.argmax(1) == targets).float().mean().item()
return {
"loss": output.loss,
"acc": acc,
"n": logits.shape[0],
}
def sample(self, inp, greedy=False, **kwargs):
if isinstance(inp, int):
inp_seq = torch.ones((1, 1), dtype=torch.int64, device=self.seq2seq.device)
inp_len = torch.ones((1,), dtype=torch.int64, device=self.seq2seq.device)
else:
inp_seq, inp_len = self.concatenate_input(inp)
inp_mask = get_mask_from_length(inp_len)
out_seq = self.seq2seq.generate(
input_ids=inp_seq,
attention_mask=inp_mask,
decoder_start_token_id=language.SOS_IDX,
pad_token_id=language.PAD_IDX,
eos_token_id=language.EOS_IDX,
do_sample=not greedy,
**kwargs,
)
# Assign last value eos, if it wasn't sampled
out_seq[:, -1] = language.EOS_IDX
# Compute length
out_len = get_length_from_output(out_seq)
scores = None
return out_seq, out_len, scores
def concatenate_input(self, inp):
batch_size = inp[0].shape[0]
# Remove sos/eos, then add at end
total_max_len = sum(seq.shape[1] - 2 for seq in inp[::2]) + 2
# Now add a single sep token for each of the arguments
n_sep = max(0, len(inp[::2]) - 1)
total_max_len += n_sep
total_inp = torch.full(
(batch_size, total_max_len),
language.PAD_IDX,
device=inp[0].device,
dtype=inp[0].dtype,
)
total_len = torch.full(
(batch_size,),
language.PAD_IDX,
device=inp[1].device,
dtype=inp[1].dtype,
)
for i in range(batch_size):
# For each ith item in the batch, concatenate inputs along all rows
# Set start of sentence
total_inp[i, 0] = language.SOS_IDX
j = 1
# Track length including sep tokens
i_len = 0
for inp_i in range(0, len(inp), 2):
# For length, ignore sos/eos
this_inp_seq = inp[inp_i][i]
this_inp_len = inp[inp_i + 1][i] - 2
# Ignore sos/eos when copying over sentence
total_inp[i, j : j + this_inp_len] = this_inp_seq[1 : this_inp_len + 1]
j += this_inp_len
i_len += this_inp_len
# Add sep token
total_inp[i, j] = self.sep_idx
j += 1
i_len += 1
# Remove the last sep token
j -= 1
i_len -= 1
total_inp[i, j] = language.EOS_IDX
total_len[i] = i_len + 2
# Trim
total_inp = total_inp[:, : total_len.max()]
return total_inp, total_len
class BinOp(nn.Module):
def __init__(self, vocab_size, embedding_size, hidden_size, fusion="multiply"):
super().__init__()
if fusion not in FUSION_MODULES:
raise NotImplementedError(f"fusion = {fusion}")
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.fusion = FUSION_MODULES[fusion](hidden_size)
self.encoder = seq2seq.Encoder(vocab_size, embedding_size, hidden_size)
self.decoder = seq2seq.Decoder(
vocab_size, hidden_size, embedding_size, hidden_size
)
def forward(self, inp, y, y_len):
"""
fuse(enc(x1), enc(x2)) -> y
"""
x1, x1_len, x2, x2_len = inp
x1_enc = self.encoder(x1, x1_len)
x2_enc = self.encoder(x2, x2_len)
x_enc = self.fusion(x1_enc, x2_enc)
return self.decoder(x_enc, y, y_len)
def sample(self, inp, **kwargs):
x1, x1_len, x2, x2_len = inp
x1_enc = self.encoder(x1, x1_len)
x2_enc = self.encoder(x2, x2_len)
x_enc = self.fusion(x1_enc, x2_enc)
return self.decoder.sample(x_enc, **kwargs)
class UnOp(nn.Module):
def __init__(self, vocab_size, embedding_size, hidden_size):
super().__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.encoder = seq2seq.Encoder(vocab_size, embedding_size, hidden_size)
self.decoder = seq2seq.Decoder(
vocab_size, hidden_size, embedding_size, hidden_size
)
def forward(self, inp, y, y_len):
"""
enc(x) -> y
"""
x, x_len = inp
x_enc = self.encoder(x, x_len)
return self.decoder(x_enc, y, y_len)
def sample(self, inp, **kwargs):
x, x_len = inp
x_enc = self.encoder(x, x_len)
return self.decoder.sample(x_enc, **kwargs)
class Primitive(nn.Module):
def __init__(self, vocab_size, embedding_size, hidden_size):
super().__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.decoder = seq2seq.Decoder(
vocab_size, hidden_size, embedding_size, hidden_size
)
def forward(self, inp, y, y_len):
"""
zeros -> y (ignore input)
"""
z = torch.zeros(
(y.shape[0], self.hidden_size),
dtype=torch.float32,
device=y.device,
)
return self.decoder(z, y, y_len)
def sample(self, n, **kwargs):
"""
NOTE: this function has a different signature (accepts batch size since
it's weird to provide a list of empties)
"""
z = torch.zeros(
(n, self.hidden_size),
dtype=torch.float32,
device=self.decoder.fc.weight.device,
)
return self.decoder.sample(z, **kwargs)
class OpDataset:
def __init__(
self,
optype,
data,
models,
vocab_size,
greedy_input=False,
length=1000,
ignore_missing=True,
sample=True,
):
self.optype = optype
self.input = data["in"]
self.data_size = len(self.input)
self.models = models
self.length = length
self.greedy_input = greedy_input
self.ignore_missing = ignore_missing
self.do_sampling = sample
# Output preprocessing
# Note OUT already has sos/eos
self.output_seq, self.output_len = self.to_idx(data["out"])
def to_idx(self, langs):
lang_len = np.array([len(t) for t in langs], dtype=np.int)
lang_idx = np.full((len(langs), max(lang_len)), language.PAD_IDX, dtype=np.int)
for i, toks in enumerate(langs):
for j, tok in enumerate(toks):
lang_idx[i, j] = int(tok)
return lang_idx, lang_len
def __len__(self):
if self.do_sampling:
return self.length
else:
return len(self.input)
def create_input(self, inp):
if not inp:
return []
seqs = []
for x in inp:
arity = len(x) - 1
if arity == 0:
# Primitive
try:
primitive_model = self.models[0][x[0]]
except KeyError:
# We have no trained model for this input
if self.ignore_missing:
return None
else:
raise RuntimeError(f"No model for {x[0]}")
*sample, _ = primitive_model.sample(1, greedy=self.greedy_input)
elif arity == 1:
op, arg = x
if len(arg) > 1:
raise RuntimeError(f"Found concept with non-primitive arg: {inp}")
# UnOp. First sample primitive, then apply the transformation
try:
primitive_model = self.models[0][arg[0]]
except KeyError:
# We have no trained model for this input
if self.ignore_missing:
return None
else:
raise RuntimeError(f"No model for {arg[0]}")
*primitive_sample, _ = primitive_model.sample(
1, greedy=self.greedy_input
)
try:
op_model = self.models[1][op]
except KeyError:
if self.ignore_missing:
return None
else:
raise RuntimeError(f"No model for {op}")
*sample, _ = op_model.sample(primitive_sample, greedy=self.greedy_input)
else:
raise NotImplementedError(f"arity {len(x)}")
seqs.extend(sample)
# All seqs have batch size 1 - squeeze
seqs = [s.squeeze(0) for s in seqs]
return seqs
def sample(self, i=None):
if i is None:
i = np.random.choice(self.data_size)
out_seq = torch.tensor(self.output_seq[i])
out_len = self.output_len[i]
# Construct the input sequence from the concept
concept = self.input[i]
# FIXME - this is inefficient
concept_str = lf_to_concept((self.optype,) + concept)
inp = self.create_input(concept)
if inp is None:
return self.sample() # Try again
return (concept_str,) + tuple(inp) + (out_seq, out_len)
def __getitem__(self, i):
if self.do_sampling:
return self.sample()
else:
return self.sample(i)
def collect_data(lang, concepts, concepts_split):
def data_holder():
return {"in": [], "out": []}
data = {
"all": defaultdict(lambda: defaultdict(data_holder)),
"train": defaultdict(lambda: defaultdict(data_holder)),
"test": defaultdict(lambda: defaultdict(data_holder)),
}
for l, c in zip(lang, concepts):
arity = len(c) - 1
# AND, OR, NOT, or a basic feature
optype = c[0]
# Input (empty for basic features)
inp = tuple(c[1:])
# Add to all
data["all"][arity][optype]["in"].append(inp)
data["all"][arity][optype]["out"].append(l)
# Add to split
if c in concepts_split["train"]:
split = "train"
elif c in concepts_split["test"]:
split = "test"
else:
raise RuntimeError(f"Can't find concept {c}")
data[split][arity][optype]["in"].append(inp)
data[split][arity][optype]["out"].append(l)
return data
def pad_collate_varying(batch):
batch_flat = list(zip(*batch))
concepts, *batch_flat = batch_flat
batch_processed = [concepts]
for i in range(0, len(batch_flat), 2):
batch_seq = batch_flat[i]
batch_len = batch_flat[i + 1]
batch_len = torch.tensor(batch_len)
batch_pad = pad_sequence(
batch_seq, padding_value=language.PAD_IDX, batch_first=True
)
batch_processed.extend((batch_pad, batch_len))
return batch_processed
def train_val_split(opdata, val_pct=0.1, by_concept=False):
if by_concept:
concepts = list(set(opdata["in"]))
# Split by concept
csize = len(concepts)
cindices = np.random.permutation(csize)
val_amt = max(1, int(val_pct * csize))
val_cidx, train_cidx = cindices[:val_amt], cindices[val_amt:]
# Now retrieve concepts
train_concepts = set([c for i, c in enumerate(concepts) if i in train_cidx])
val_concepts = set([c for i, c in enumerate(concepts) if i in val_cidx])
train_idx = [i for i, c in enumerate(opdata["in"]) if c in train_concepts]
val_idx = [i for i, c in enumerate(opdata["in"]) if c in val_concepts]
assert len(train_idx) + len(val_idx) == len(opdata["in"])
assert set(train_idx + val_idx) == set(range(len(opdata["in"])))
else:
# Just split generically
dsize = len(opdata["in"])
assert dsize == len(opdata["out"])
indices = np.random.permutation(dsize)
val_amt = int(val_pct * dsize)
val_idx, train_idx = indices[:val_amt], indices[val_amt:]
val_idx = set(val_idx)
train_idx = set(train_idx)
train_opdata = {
"in": [x for i, x in enumerate(opdata["in"]) if i in train_idx],
"out": [x for i, x in enumerate(opdata["out"]) if i in train_idx],
}
val_opdata = {
"in": [x for i, x in enumerate(opdata["in"]) if i in val_idx],
"out": [x for i, x in enumerate(opdata["out"]) if i in val_idx],
}
return train_opdata, val_opdata
def train_model(model, models, optype, opdata, vocab_size, args):
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss(reduction="none")
if args.include_not:
# split by concept if there is more than 1 concept
by_concept = len(set(opdata["in"])) > 1
else:
# split by concept only if and/or (i.e. binary)
by_concept = optype in {"and", "or"}
train_opdata, val_opdata = train_val_split(
opdata, val_pct=0.1, by_concept=by_concept
)
train_dataset = OpDataset(optype, train_opdata, models, vocab_size)
val_dataset = OpDataset(
optype, val_opdata, models, vocab_size, sample=False, greedy_input=True
)
dataloaders = {
"train": DataLoader(
train_dataset,
num_workers=0,
collate_fn=pad_collate_varying,
batch_size=args.batch_size,
shuffle=True,
),
"val": DataLoader(
val_dataset,
num_workers=0,
collate_fn=pad_collate_varying,
batch_size=args.batch_size,
shuffle=False,
),
}
def run_for_one_epoch(split, epoch):
training = split == "train"
dataloader = dataloaders[split]
torch.set_grad_enabled(training)
model.train(mode=training)
stats = util.Statistics()
for batch_i, batch in enumerate(dataloader):
concepts, *batch = batch
if args.cuda:
batch = [x.cuda() for x in batch]
*inp, out_seq, out_len = batch
# Preds are from 0 to n-1
if args.model_type == "transformer":
output = model(inp, out_seq, out_len)
loss = output["loss"]
acc = output["acc"]
output_batch_size = output["n"]
else:
scores, targets = model(inp, out_seq, out_len)
losses = criterion(scores, targets)
loss = losses.mean()
accs = (scores.argmax(1) == targets).float()
acc = accs.mean()
output_batch_size = scores.shape[0]
mbc = compute_metrics_by_concept(
concepts,
loss=losses.detach().cpu().numpy(),
acc=accs.detach().cpu().numpy(),
)
for concept, concept_metrics in mbc.items():
for metric, cms in concept_metrics.items():
n_cm = len(cms)
cm_mean = np.mean(cms)
stats.update(
**{f"{concept}_{metric}": cm_mean}, batch_size=n_cm
)
if not training:
# TODO - sample and measure top1 accuracy?
pass
stats.update(
**{f"{optype}_loss": loss, f"{optype}_acc": acc},
batch_size=output_batch_size,
)
if training and not args.no_train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
return stats.averages()
pbar = tqdm(total=args.epochs)
best_loss_key = f"best_{optype}_loss"
best_epoch_key = f"best_{optype}_epoch"
loss_key = f"{optype}_loss"
acc_key = f"{optype}_acc"
metrics = {
best_loss_key: np.inf,
best_epoch_key: 0,
}
best_model_state = None
for epoch in range(args.epochs):
train_metrics = run_for_one_epoch("train", epoch)
util.update_with_prefix(metrics, train_metrics, "train")
val_metrics = run_for_one_epoch("val", epoch)
util.update_with_prefix(metrics, val_metrics, "val")
if val_metrics[loss_key] < metrics[best_loss_key]:
metrics[best_loss_key] = val_metrics[loss_key]
metrics[best_epoch_key] = epoch
best_model_state = copy.deepcopy(model.state_dict())
pbar.update(1)
pbar.set_description(
f"{epoch} {optype} train loss {train_metrics[loss_key]:3f} train top1 {train_metrics[acc_key]:3f} val loss {val_metrics[loss_key]:3f} val top1 {val_metrics[acc_key]:3f} best loss {metrics[best_loss_key]:3f} @ {metrics[best_epoch_key]}"
)
if best_model_state is not None:
model.load_state_dict(best_model_state)
return metrics
def compute_metrics_by_concept(concepts, **kwargs):
metrics_by_concept = defaultdict(lambda: defaultdict(list))
for i, c in enumerate(concepts):
for metric, vals in kwargs.items():
metrics_by_concept[c][metric].append(vals[i])
return metrics_by_concept
def sample(
split,
models,
data,
vocab_size,
exp_args,
args,
metrics=None,
epochs=1,
greedy_input=False,
greedy=False,
):
criterion = nn.CrossEntropyLoss(reduction="none")
if metrics is None:
metrics = {}
def sample_from_model(m, optype, opdata):
dataset = OpDataset(
optype, opdata, models, vocab_size, greedy_input=greedy_input
)
dataloader = DataLoader(
dataset,
num_workers=0,
collate_fn=pad_collate_varying,
batch_size=args.batch_size,
)
stats = util.Statistics()
samples = []
for _ in range(epochs):
for batch_i, batch in enumerate(dataloader):
concepts, *batch = batch
if args.cuda:
batch = [x.cuda() for x in batch]
*inp, out_seq, out_len = batch
with torch.no_grad():
if args.model_type == "transformer":
output = m(inp, out_seq, out_len)
loss = output["loss"]
acc = output["acc"]
output_batch_size = output["n"]
else:
scores, targets = m(inp, out_seq, out_len)
losses = criterion(scores, targets)
loss = losses.mean()
output_batch_size = scores.shape[0]
accs = (scores.argmax(1) == targets).float()
acc = accs.mean()
mbc = compute_metrics_by_concept(
concepts,
loss=losses.detach().cpu().numpy(),
acc=accs.detach().cpu().numpy(),
)
for concept, concept_metrics in mbc.items():
for metric, cms in concept_metrics.items():
n_cm = len(cms)
cm_mean = np.mean(cms)
stats.update(
**{f"{concept}_{metric}": cm_mean}, batch_size=n_cm
)
if len(inp) == 0:
inp = out_seq.shape[0] # Specify a number of samples
lang, lang_len, _ = m.sample(
inp, greedy=greedy, max_length=exp_args.max_lang_length
)
samples.extend(
zip(
lang.cpu(),
lang_len.cpu(),
out_seq.cpu(),
out_len.cpu(),
concepts,
)
)
stats.update(loss=loss, acc=acc, batch_size=output_batch_size)
# Coalesce
sample_names = [
"pred_lang",
"pred_lang_len",
"model_lang",
"model_lang_len",
"gt_lang",
]
samples_arrs = list(zip(*samples))
samples = {name: samples_arrs[i] for i, name in enumerate(sample_names)}
return stats.averages(), samples
samples = defaultdict(list)
pbar = tqdm(total=sum(len(x) for arity, x in data.items() if arity in data))
for arity in (0, 1, 2):
if arity not in data:
continue
adata = data[arity]
for optype, opdata in adata.items():
pbar.set_description(f"Sample: {optype}")
# Get the model
try:
m = models[arity][optype]
except KeyError:
raise RuntimeError(f"No model for {optype}")
m.eval()
if args.cuda:
m.cuda()
op_metrics, op_samples = sample_from_model(m, optype, opdata)
util.update_with_prefix(metrics, op_metrics, split)
# Save the model
for name, vals in op_samples.items():
samples[name].extend(vals)
pbar.update(1)
pbar.close()
return metrics, dict(samples)
def get_model(arity, vocab_size, args):
if args.model_type == "rnn":
if arity == 0:
m = Primitive(vocab_size, args.embedding_size, args.hidden_size)
elif arity == 1:
m = UnOp(vocab_size, args.embedding_size, args.hidden_size)
elif arity == 2:
m = BinOp(
vocab_size,
args.embedding_size,
args.hidden_size,
fusion="multiply",
)
else:
# Generic transformer model
m = OpT(vocab_size, args.embedding_size, args.hidden_size)
return m
def train(data, vocab_size, args):
models = defaultdict(dict)
metrics = {}
pbar = tqdm(total=sum(len(x) for x in data.values()))
for arity in (0, 1, 2):
adata = data[arity]
for optype, opdata in adata.items():
pbar.set_description(f"Train: {optype}")
m = get_model(arity, vocab_size, args)
if args.cuda:
m = m.cuda()
op_metrics = train_model(m, models, optype, opdata, vocab_size, args)
util.update_with_prefix(metrics, op_metrics, "train")
# Save the model
models[arity][optype] = m
pbar.update(1)
pbar.close()
return models, metrics
TOK_NAMES = string.ascii_lowercase + "".join(map(str, range(10)))
def anonymize(out):
out_anon = []
for tok in out:
i = int(tok)
try:
tok_anon = TOK_NAMES[i]
except IndexError:
tok_anon = str(i + 10)
out_anon.append(tok_anon)
return out_anon
def flatten(nested):
return tuple(_flatten(nested))
def _flatten(nested):
if isinstance(nested, str):
return [
nested,
]
else:
flattened = []
for item in nested:
flattened.extend(_flatten(item))
return flattened
def flatten_opdata(opdata):
concepts = []
messages = []
for concept, cdata in opdata.items():
for m in cdata:
concepts.append(concept)
messages.append(m)
return concepts, messages
def get_opname(concept_flat):
if concept_flat[0] == "and":
return "AND"
elif concept_flat[0] == "or":
return "OR"
elif concept_flat[0] == "not":
return "NOT"
else:
return "prim"
def get_data_stats(data, unique_concepts):
records = []
entropies = []
concepts = []
messages = []
for arity, adata in data.items():
for optype, opdata in adata.items():
# Sort language further by concept (i.e. op + args)
opdata_by_concept = defaultdict(list)
for inp, out in zip(opdata["in"], opdata["out"]):
out = " ".join(anonymize(out))
opdata_by_concept[(optype,) + inp].append(out)
# Flatten + extend
a_cs, a_ms = flatten_opdata(opdata_by_concept)
concepts.extend(a_cs)
messages.extend(a_ms)
# For each concept, get distribution of utterances
for concept, cdata in opdata_by_concept.items():
counts = Counter(cdata)
counts_total = sum(counts.values())
counts_norm = {k: v / counts_total for k, v in counts.items()}
entropy = scipy.stats.entropy(list(counts.values()))
entropies.append(entropy)
if concept in unique_concepts["train"]:
seen = "seen"
elif concept in unique_concepts["test"]:
seen = "unseen"
else:
raise RuntimeError(f"Can't find concept {concept}")
concept_flat = flatten(concept)
concept_str = " ".join(concept_flat)
ctype = get_opname(concept_flat)
for lang, count in counts.items():
percent = counts_norm[lang]
concept_flat = " ".join(flatten(concept))
records.append(
{
"arity": arity,
"type": ctype,
"concept": concept_str,
"lang": lang,
"count": count,
"percent": percent,
"entropy": entropy,
"seen": seen,
}
)
concept_df = pd.DataFrame(records)
# Overall stats - (1) MI; (2) conditional entropy
concepts = [" ".join(flatten(c)) for c in concepts]
mi = mutual_info_score(concepts, messages)
ami = adjusted_mutual_info_score(concepts, messages)
overall_stats = {
"entropy": np.mean(entropies),
"mi": mi,
"ami": ami,
}
return concept_df, overall_stats
def metrics_to_df(metrics):
records = []
for mname, value in metrics.items():
split, *optional_concept, metric = mname.split("_")
if optional_concept:
concept = optional_concept[0]
try:
concept_lf = concept_to_lf(concept)
except AssertionError:
continue
arity = len(concept_lf) - 1
concept_flat = flatten(concept_lf)
concept = " ".join(concept_flat)
op = get_opname(concept_flat)
else:
concept = "overall"
arity = float("nan")
op = "overall"
records.append(
{
"split": split,
"concept": concept,
"metric": metric,
"value": value,
"arity": arity,
"op": op,
}
)
return | pd.DataFrame(records) | pandas.DataFrame |
# -*- coding:utf-8 -*-
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rc('font',family='Times New Roman')
plt.rc('font',size=9.5)
import random
import os
cwd = r'..\large_scale_synchronization_r4'
import csv
def main(nb_DC,nb_warehouse,nb_pick_up_station,square_length):
with open(os.path.join(cwd,'node.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['node_name', 'node_id','x_coord', 'y_coord','FT'])
node_id=1
print('distribution center')
count = 0
x_dc = np.zeros(nb_DC)
y_dc = np.zeros(nb_DC)
while count < nb_DC:
x_dc[count] = np.random.randint(-square_length*0.9,square_length*0.9)
y_dc[count] = np.random.randint(-square_length*0.9,square_length*0.9)
print(x_dc[count], y_dc[count])
line=['DC'+str(count+1),node_id,x_dc[count],y_dc[count],'distribution_center' ]
writer.writerow(line)
node_id+=1
count += 1
print('random generate '+str(count)+' distribution centers...')
transshipment_cost=np.zeros([nb_DC*nb_rec,nb_DC*nb_del])
for k in range(nb_rec):
for l in range(nb_del):
for i in range(nb_DC):
for j in range(nb_DC):
# mahanttan distance
transshipment_cost[k*nb_DC+i][l*nb_DC+j]=abs(y_dc[i]-y_dc[j])+abs(x_dc[i]-x_dc[j])
# convert array into dataframe
DF = pd.DataFrame(transshipment_cost)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"transshipment_time.csv"),index=False,header=False)
print('warehouse')
x_w = np.zeros(nb_warehouse)
y_w = np.zeros(nb_warehouse)
count = 0
while count < nb_warehouse:
x_w[count] = np.random.randint(-square_length,square_length)
y_w[count] = np.random.randint(-square_length,square_length)
print(x_w[count], y_w[count])
line=['WH'+str(count+1),node_id,x_w[count],y_w[count],'warehouse']
writer.writerow(line)
node_id+=1
count += 1
print('random generate '+str(count)+' warehouses...')
travel_time_1=np.zeros([nb_DC*nb_rec,nb_DC*nb_rec])
for k in range(nb_rec):
for i in range(nb_warehouse):
for j in range(nb_DC):
# mahanttan distance
travel_time_1[i][k*nb_DC+j]=(abs(y_w[i]-y_dc[j])+abs(x_w[i]-x_dc[j]))/2
# convert array into dataframe
DF = pd.DataFrame(travel_time_1)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"travel_time_1.csv"),index=False,header=False)
print('pick-up station')
x_s = np.zeros(nb_pick_up_station)
y_s = np.zeros(nb_pick_up_station)
count = 0
while count < nb_pick_up_station:
x_s[count] = np.random.randint(-square_length,square_length)
y_s[count] = np.random.randint(-square_length,square_length)
print(x_s[count], y_s[count])
line=['PS'+str(count+1),node_id,x_s[count],y_s[count],'pick-up_station']
writer.writerow(line)
node_id+=1
count += 1
print('random generate '+str(count)+' pick up stations...')
travel_time_2=np.zeros([nb_DC*nb_del,nb_DC*nb_del])
for k in range (nb_del):
for i in range(nb_pick_up_station):
for j in range(nb_DC):
travel_time_2[nb_DC*k+j][i]=(abs(y_s[i]-y_dc[j])+abs(x_s[i]-x_dc[j]))/2
# convert array into dataframe
DF = pd.DataFrame(travel_time_2)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"travel_time_2.csv"),index=False,header=False)
plt.figure(figsize=(8,8.1),dpi=125)
plt.plot(x_dc,y_dc,'o',label='Distribution centers',markersize=8,c='k')
plt.plot(x_w,y_w,'D', label ='Warehouses',markersize=5,c='b')
plt.plot(x_s,y_s,'D', label='Pick-up stations',markersize=5,c='r')
plt.xlim((-square_length-3,square_length+3))
plt.ylim((-square_length-3,square_length+3))
my_x_ticks = np.arange(-square_length-3,square_length+3, 1)
my_y_ticks = np.arange(-square_length-3,square_length+3, 1)
plt.xticks(my_x_ticks)
plt.yticks(my_y_ticks)
plt.legend(loc='best')
plt.grid(True)
plt.title('Random Scatter')
plt.savefig(os.path.join(cwd,'imag.png'))
plt.show()
return travel_time_1,travel_time_2,transshipment_cost
if __name__=='__main__':
receive_wave=[11,16]
order_time=[6,9,10,14,17,6,9,11,14,17,7,10,12,15,8,16]
delivery_wave=[6,14]
pick_up_time=[8,9.5,16,18,7.5,10,17,19,8,9.5,17,19,20,21,17.5,13,8,9,11,15]
period_time=24
timewindow=24
global nb_rec
global nb_del
nb_rec=len(receive_wave)
nb_del=len(delivery_wave)
nb_DC=12
nb_warehouse=len(order_time)
nb_station=len(pick_up_time)
city_radius=6
fix_transport_cost=2 # dollar per hour
fix_inventory_cost=0.1 # dollar per hour
var_transport_rate=1 # dollar per hour
var_inventory_cost=0.1 # dollar per hour
data_flow = np.loadtxt(os.path.join(cwd,'t_flow_matrix.txt'))
print('first-stage timetable...')
timetable_1=np.zeros((2,nb_DC*nb_rec))
for i in range(2):
timetable_1[0][i*nb_DC:i*nb_DC+nb_DC]=receive_wave[i]
timetable_1[1,:]=-1
for w in range(nb_warehouse):
timetable_1[1,w]=np.random.randint(0,24)
timetable_1[1,w]=order_time[w]
timetable_o=timetable_1
# convert array into dataframe
DF = pd.DataFrame(timetable_1)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"timetable_1.csv"),index=False,header=False)
print('second-stage timetable...')
timetable_2=np.zeros((2,nb_DC*nb_del))
for i in range(2):
timetable_2[1][i*nb_DC:i*nb_DC+nb_DC]=receive_wave[i]
timetable_2[0,:]=-1
for s in range(nb_station):
timetable_2[0,s]=np.random.randint(0,24)
timetable_2[0,s]=pick_up_time[s]
timetable_d=timetable_2
# convert array into dataframe
DF = pd.DataFrame(timetable_2)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"timetable_2.csv"),index=False,header=False)
travel_time_o,travel_time_d,transshipment_time=main(nb_DC,nb_warehouse,nb_station,city_radius)
q,n= timetable_o.shape
q,m= timetable_d.shape
data_built_1=np.zeros([n,n])
data_built_2=np.zeros([m,m])
data_dis=np.zeros([n,m])
for i in range(n):
if timetable_o[1][i] !=-1:
for k in range(n):
tmp=np.mod(timetable_o[1][i]+travel_time_o[i][k],period_time)
nb_of_period=abs(np.floor((timetable_o[1][i]+travel_time_o[i][k])/period_time))
if timetable_o[0][k]<tmp:
data_built_1[i][k]=period_time-timetable_o[1][i]+timetable_o[0][k]+nb_of_period*period_time
if timetable_o[0][k]>=tmp:
data_built_1[i][k]=timetable_o[0][k]-timetable_o[1][i]+nb_of_period*period_time
elif timetable_o[1][i] ==-1:
for k in range(n):
data_built_1[i][k]=0
for j in range(m):
if timetable_d[0][j] !=-1:
for l in range(m):
tmp=np.mod(timetable_d[0][j]-travel_time_d[l][j],period_time)
nb_of_period=abs(np.floor((timetable_o[1][i]+travel_time_o[i][k])/period_time))
if timetable_d[1][l]>tmp:
data_built_2[l][j]=period_time-timetable_d[1][l]+timetable_d[0][j]
if timetable_d[1][l]<=tmp:
data_built_2[l][j]=timetable_d[0][j]-timetable_d[1][l]
elif timetable_d[0][j] ==-1:
for l in range(m):
data_built_2[l][j]=0
for i in range(n):
for k in range(n):
if (travel_time_o[i][k]+timewindow<data_built_1[i][k]):
if travel_time_o[i][k] !=0:
data_built_1[i][k]=100000000
for l in range(m):
for j in range(m):
if (travel_time_d[l][j]+timewindow<data_built_2[l][j]):
if travel_time_d[l][j] !=0:
data_built_2[l][j]=100000000
data_built_1_A=(data_built_1-travel_time_o)*fix_inventory_cost+travel_time_o*fix_transport_cost
data_built_2_A=(data_built_2-travel_time_d)*fix_inventory_cost+travel_time_d*fix_transport_cost
for k in range(n):
for l in range(m):
tmp=np.mod(timetable_o[0][k]+transshipment_time[k][l],period_time)
nb_of_period=abs(np.floor((timetable_o[1][i]+travel_time_o[i][k])/period_time))
if tmp<timetable_d[1][l]:
data_dis[k][l] = (timetable_d[1][l]-timetable_o[0][k])+nb_of_period*period_time
if tmp>=timetable_d[1][l]:
data_dis[k][l]=(period_time-timetable_o[0][k]+timetable_d[1][l])+nb_of_period*period_time
data_dis=(data_dis-transshipment_time)*var_inventory_cost+transshipment_time*var_transport_rate
built1_df=pd.DataFrame(data_built_1)
built2_df=pd.DataFrame(data_built_2)
flow_df= | pd.DataFrame(data_flow) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 18:29:39 2021
@author: Clement
"""
import pandas
import numpy
import os
import sys
import geopandas as gpd
import tqdm
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from gen_fct import df_fct
from gen_fct import file_fct
class WorldDataSet:
def __init__ (self):
self.df_cases = pandas.DataFrame()
self.df_death = pandas.DataFrame()
self.df_fra = pandas.DataFrame()
self.df_world = | pandas.DataFrame() | pandas.DataFrame |
"""
Outputing CAM of tiles
Created on 04/21/2020
@author: RH
"""
import argparse
import os
import numpy as np
import cv2
import pandas as pd
import tensorflow as tf
import Panoptes1
import saliency
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
parser = argparse.ArgumentParser()
parser.add_argument('--dirr', type=str, default='trial', help='output directory')
parser.add_argument('--classes', type=int, default=2, help='number of classes to predict')
parser.add_argument('--pdmd', type=str, default='tumor', help='feature to predict')
parser.add_argument('--imgfile',type=str, default=None,
help='load the image (eg. CCRCC/C3L-SSSSS-SS,CCRCC/C3L-SSSSS-SS)')
parser.add_argument('--modeltoload', type=str, default='', help='reload trained model')
parser.add_argument('--metadir', type=str, default='', help='reload trained model')
opt = parser.parse_args()
dirr = opt.dirr
classes = opt.classes
pdmd = opt.pdmd
imgfile = opt.imgfile
modeltoload = opt.modeltoload
metadir = opt.metadir
# image to double
def im2double(im):
return cv2.normalize(im.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
# image to jpg
def py_map2jpg(imgmap):
heatmap_x = np.round(imgmap*255).astype(np.uint8)
return cv2.applyColorMap(heatmap_x, cv2.COLORMAP_JET)
def inference(xa_in_re, xb_in_re, xc_in_re, num_classes):
logits, nett, ww = Panoptes1.Panoptes1(xa_in_re, xb_in_re, xc_in_re,
num_cls=num_classes,
is_train=False,
dropout=0.5,
scope='Panoptes1')
return logits, nett
if __name__ == "__main__":
try:
os.mkdir('../Results/'+dirr)
except FileExistsError:
pass
try:
os.mkdir('../Results/'+dirr+'/saliency')
except FileExistsError:
pass
if not os.path.isfile('../Results/' + dirr + '/level1/dict.csv'):
tumor = imgfile.split('/')[0]
slideID = imgfile.split("-")[-1]
patientID = imgfile.rsplit("-", 1)[0].split('/')[-1]
os.symlink("../../tiles/" + tumor + "/" + patientID + "/" + slideID + '/level1', '../Results/'+dirr + '/level1',
target_is_directory=True)
tiles = | pd.read_csv('../Results/' + dirr + '/level1/dict.csv') | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/kaggle_rfcx-species-audio-detection.ipynb (unless otherwise specified).
__all__ = ['audio_augment', 'train', 'post_processing', 'ensemble', 'get_preds', 'test', 'main']
# Cell
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from fastprogress import progress_bar
from IPython.core.debugger import set_trace
import gc
from fastscript import *
from fastcore.all import *
from fastai.vision.all import *
from ..core import *
from ..audio.core import *
from ..audio.augmentations import *
from ..audio.dataset import *
from ..vision.models import *
from ..vision.losses import *
from ..audio.util import *
# Cell
def audio_augment(sample_rate, p=0.25):
return Pipeline([
ClippingDistortion(sample_rate, max_percentile_threshold=10, p=p),
PitchShift(sample_rate, min_semitones=-8, max_semitones=8, p=p),
])
# Cell
def train(sample_rate, num_classes, fold, n_epochs, lr, wd, tile_width, bs, aug_ps,
model_name, loss_func, plot, load_checkpoint=None, lr_find=False, head_ps=0.8,
mixup=False, n_mels=128, hop_length=512, model_arch='resnest50'):
seed_everything()
cbs = []
path = Path('/kaggle/kaggle_rainforest_audio/data')
rename_cols = RenameColumns(id='recording_id', label='species_id', tmin='t_min',
tmax='t_max',fmin='f_min', fmax='f_max')
df = Pipeline([load_dataframe, rename_cols, group_labels])(path/'train_tp.csv')
train_df, valid_df = kfold_dataframes(df, fold)
tfms = partial(apply_augmentations, augs_pipeline=audio_augment(sample_rate, p=aug_ps))
train_data = Datasets(items=train_df, tfms=partial(create_dataset_item, path=path,
sample_rate=sample_rate, tile_width=tile_width,
n_mels=n_mels, hop_length=hop_length))
valid_data = Datasets(items=valid_df, tfms=partial(create_dataset_item, path=path,
sample_rate=sample_rate, tile_width=tile_width,
n_mels=n_mels, hop_length=hop_length))
train_dl = DataLoader(
train_data, bs=bs, do_batch=reorganize_batch, shuffle=True,
num_workers=8, after_item=tfms,
after_batch=MelSpectrogram(sample_rate,n_mels=n_mels,hop_length=hop_length))
valid_dl = DataLoader(
valid_data, bs=bs, do_batch=reorganize_batch, num_workers=8,
after_batch=MelSpectrogram(sample_rate, n_mels=n_mels,hop_length=hop_length))
dls = DataLoaders(train_dl, valid_dl)
dls.device = torch.device("cuda:0")
if plot:
xb, yb = dls.one_batch()
show_augmentations(train_data, train_dl, sample_rate=sample_rate)
model = get_model(model_arch, num_classes=num_classes, head_ps=head_ps, in_channels=1)
if mixup:
cbs.append(MixUp(0.4))
loss_func += '_mixup'
def before_loss(x,y):
x,y=mask2category(x,y)
return x, y
def after_loss(loss, y):
return loss
loss = get_loss(loss_func, before=before_loss, after=after_loss)
print('Loss function: ', loss_func)
learn = Learner(dls, model, loss_func=loss, metrics=[accuracy, lrap], cbs=cbs)
learn.to_fp16(clip=0.5);
if load_checkpoint is not None:
learn.load(path.parent/f'models/{load_checkpoint}_fold{fold}')
print('Load model ', path.parent/f'models/{load_checkpoint}_fold{fold}')
if lr_find: learn.lr_find()
learn.fit_one_cycle(n_epochs, lr, wd=wd, div_final=10, div=10)
learn.save(path.parent/f'models/{model_name}_fold{fold}')
print(f'Model saved to', path.parent/f'models/{model_name}_fold{fold}')
# Cell
def post_processing(df, path_save, model_name, tile_width, MODE=2):
"""
Post processing idea by <NAME> shared at
https://www.kaggle.com/c/rfcx-species-audio-detection/discussion/220389
"""
# USE MODE 1, 2, or 3
# LOAD SUBMISSION
FUDGE = 2.0
for k in range(24):
df.iloc[:,1+k] -= df.iloc[:,1+k].min()
df.iloc[:,1+k] /= df.iloc[:,1+k].max()
# CONVERT PROBS TO ODDS, APPLY MULTIPLIER, CONVERT BACK TO PROBS
def scale(probs, factor):
probs = probs.copy()
idx = np.where(probs!=1)[0]
odds = factor * probs[idx] / (1-probs[idx])
probs[idx] = odds/(1+odds)
return probs
# TRAIN AND TEST MEANS
d1 = df.iloc[:,1:].mean().values
d2 = np.array([113,204,44,923,53,41,3,213,44,23,26,149,255,14,123,222,46,6,474,4,17,18,23,72])/1000.
for k in range(24):
if MODE==1: d = FUDGE
if MODE==2: d = d1[k]/(1-d1[k])
if MODE==3: s = d2[k] / d1[k]
else: s = (d2[k]/(1-d2[k]))/d
df.iloc[:,k+1] = scale(df.iloc[:,k+1].values,s)
df.to_csv(path_save/f'submission_with_pp_{model_name}_{tile_width}.csv',index=False)
def ensemble(files):
dfs = [ | pd.read_csv(f) | pandas.read_csv |
import asyncio
import json
import logging
import math
import os
import sys
from argparse import ArgumentParser
from concurrent.futures import CancelledError
from csv import DictWriter
from datetime import date
from io import StringIO
from itertools import chain
from pathlib import Path
from urllib.parse import urlencode
import aiofiles
import aiohttp
import pandas as pd
import numpy as np
from geopy.distance import vincenty
DTYPE = dict(cnpj=np.str, cnpj_cpf=np.str)
LOG_FORMAT = '[%(levelname)s] %(asctime)s: %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=LOG_FORMAT)
class GooglePlacesURL:
BASE_URL = 'https://maps.googleapis.com/maps/api/place/'
def __init__(self, key):
self.key = key
def url(self, endpoint, query=None, format='json'):
"""
:param endpoint: (str) Google Places API endpoint name (e.g. details)
:param query: (tuple) tuples with key/values pairs for the URL query
:param format: (str) output format (default is `json`)
:return: (str) URL to do an authenticated Google Places request
"""
key = ('key', self.key)
query = tuple(chain(query, (key,))) if query else (key)
parts = (
self.BASE_URL,
endpoint,
'/{}?'.format(format),
urlencode(query)
)
return ''.join(parts)
def details(self, place):
"""
:param place: (int or str) ID of the place in Google Place
:return: (str) URL to do a place details Google Places search
"""
query = (('placeid', place),)
return self.url('details', query)
def nearby(self, keyword, location):
"""
:param keywork: (str) category to search places
:return: (str) URL to do a nearby Google Places search
"""
query = (
('location', location),
('keyword', keyword),
('rankby', 'distance'),
)
return self.url('nearbysearch', query)
class SexPlacesNearBy:
KEYWORDS = ('acompanhantes',
'adult entertainment club',
'adult entertainment store',
'gay sauna',
'massagem',
'modeling agency',
'motel',
'night club',
'sex club',
'sex shop',
'strip club',
'swinger clubs')
def __init__(self, company, key=None):
"""
:param company: (dict) Company with name, cnpj, latitude and longitude
:param key: (str) Google Places API key
"""
self.url = GooglePlacesURL(key or config('GOOGLE_API_KEY'))
self.company = company
self.latitude = self.company['latitude']
self.longitude = self.company['longitude']
self.places = []
self.closest = None
@property
def company_name(self):
return self.company.get('trade_name') or self.company.get('name')
@property
def valid(self):
try:
coords = map(float, (self.latitude, self.longitude))
except ValueError:
return False
if any(map(math.isnan, coords)):
return False
return True
async def get_closest(self):
"""
Start the requests to store a place per self.KEYWORD in self.places.
Then gets the closest place found, queries for its details and returns
a dict with the details for that place.
"""
if not self.valid:
msg = 'No geolocation information for company: {} ({})'
logging.info(msg.format(self.company_name, self.company['cnpj']))
return None
tasks = [self.load_place(k) for k in self.KEYWORDS]
await asyncio.gather(*tasks)
ordered = sorted(self.places, key=lambda x: x['distance'])
for place in ordered:
place = await self.load_details(place)
name = place.get('name', '').lower()
if place.get('keyword') == 'motel' and 'hotel' in name:
pass # google returns hotels when looking for a motel
else:
prefix = '💋 ' if place.get('distance') < 5 else ''
msg = '{}Found something interesting {:.2f}m away from {}…'
args = (prefix, place.get('distance'), self.company_name)
logging.info(msg.format(*args))
self.closest = place
return place
async def load_place(self, keyword, print_status=False):
"""
Given a keyword it loads the place returned by the API to self.places.
"""
if print_status:
msg = 'Looking for a {} near {} ({})…'
args = (keyword, self.company_name, self.company.get('cnpj'))
logging.info(msg.format(*args))
location = '{},{}'.format(self.latitude, self.longitude)
url = self.url.nearby(keyword, location)
try:
response = await aiohttp.request('GET', url)
except aiohttp.TimeoutError:
logging.info('Timeout raised for {}'.format(url))
else:
content = await response.text()
place = self.parse(keyword, content)
if place and isinstance(place.get('distance'), float):
self.places.append(place)
def parse(self, keyword, content):
"""
Return a dictionary with information of the nearest sex place
around a given company.
:param keyword: (str) keyword used by the request
:param content: (str) content of the response to the request
:return: (dict) with
* name : The name of nearest sex place
* latitude : The latitude of nearest sex place
* longitude : The longitude of nearest sex place
* distance : Distance (in meters) between the company and the
nearest sex place
* address : The address of the nearest sex place
* phone : The phone of the nearest sex place
* id : The Google Place ID of the nearest sex place
* keyword : term that matched the sex place in Google Place Search
Google responses:
* `OK` indicates that no errors occurred; the place was
successfully detected and at least one result was returned.
* `UNKNOWN_ERROR` indicates a server-side error; trying again may
be successful.
* `ZERO_RESULTS` indicates that the reference was valid but no
longer refers to a valid result. This may occur if the
establishment is no longer in business.
* `OVER_QUERY_LIMIT` indicates that you are over your quota.
* `REQUEST_DENIED` indicates that your request was denied,
generally because of lack of an invalid key parameter.
* `INVALID_REQUEST` generally indicates that the query (reference)
is missing.
* `NOT_FOUND` indicates that the referenced location was not found
in the Places database.<Paste>
Source: https://developers.googlefetchplaces/web-service/details
"""
response = json.loads(content)
status = response.get('status')
if status != 'OK':
if status in ('OVER_QUERY_LIMIT', 'REQUEST_DENIED'):
shutdown() # reached API limit or API key is missing/wrong
if status != 'ZERO_RESULTS':
error = response.get('error', '')
msg = 'Google Places API Status: {} {}'.format(status, error)
logging.info(msg.strip())
return None
place, *_ = response.get('results', [{}])
location = place.get('geometry', {}).get('location', {})
latitude = float(location.get('lat'))
longitude = float(location.get('lng'))
company_location = (self.latitude, self.longitude)
place_location = (latitude, longitude)
distance = vincenty(company_location, place_location)
return {
'id': place.get('place_id'),
'keyword': keyword,
'latitude': latitude,
'longitude': longitude,
'distance': distance.meters,
'cnpj': self.company.get('cnpj'),
'company_name': self.company.get('name'),
'company_trade_name': self.company.get('trade_name')
}
async def load_details(self, place):
"""
:param place: dictionary with id key.
:return: dictionary updated with name, address and phone.
"""
place_id = place.get('id')
if not place_id:
return place
# request place details
try:
response = await aiohttp.request('GET', self.url.details(place_id))
except aiohttp.TimeoutError:
logging.info('Timeout raised for {}'.format(url))
return place
else:
content = await response.text()
# parse place details
try:
details = json.loads(content)
except ValueError:
return place
else:
if not details:
return place
result = details.get('result', {})
place.update(dict(
name=result.get('name', ''),
address=result.get('formatted_address', ''),
phone=result.get('formatted_phone_number', '')
))
return place
async def write_to_csv(path, place=None, **kwargs):
"""
Receives a given place (dict) and writes it in the CSV format into path.
CSV headers are defined in `fields`. The named argument `headers`
(bool) determines if the functions write the CSV header or not.
"""
headers = kwargs.get('headers', False)
if not place and not headers:
return
fields = (
'id', 'keyword', 'latitude', 'longitude', 'distance', 'name',
'address', 'phone', 'cnpj', 'company_name', 'company_trade_name'
)
with StringIO() as obj:
writer = DictWriter(obj, fieldnames=fields, extrasaction='ignore')
if headers:
writer.writeheader()
if place:
writer.writerow(place)
async with aiofiles.open(path, mode='a') as fh:
await fh.write(obj.getvalue())
async def fetch_place(company, output, semaphore):
"""
Gets a company (dict), finds the closest place nearby and write the result
to a CSV file.
"""
with (await semaphore):
places = SexPlacesNearBy(company)
await places.get_closest()
if places.closest:
await write_to_csv(output, places.closest)
async def main_coro(companies, output, max_requests):
"""
:param companies: (Pandas DataFrame)
:param output: (str) Path to the CSV output
:param max_requests: (int) max parallel requests
"""
# write CSV headers
if is_new_dataset(output) and not companies.empty:
await write_to_csv(output, headers=True)
semaphore = asyncio.Semaphore(max_requests // 13) # 13 reqs per company
tasks = []
logging.info("Let's get started!")
# write CSV data
for company_row in companies.itertuples(index=True):
company = dict(company_row._asdict()) # _asdict() returns OrderedDict
tasks.append(fetch_place(company, output, semaphore))
await asyncio.wait(tasks)
def find_newest_file(pattern='*.*', source_dir='.'):
"""
Assuming that the files will be in the form of:
yyyy-mm-dd-type-of-file.xz we can try to find the newest file
based on the date.
"""
files = sorted(Path(source_dir).glob(pattern))
if not files:
return None
file = files.pop()
if not file:
return None
return str(file)
def load_newest_dataset(pattern, usecols, na_value=''):
filepath = find_newest_file(pattern)
if not filepath:
return None
logging.info('Loading {}'.format(filepath))
dataset = pd.read_csv(
filepath,
dtype=DTYPE,
low_memory=False,
usecols=usecols
)
dataset = dataset.fillna(value=na_value)
return dataset
def get_companies(companies_path, **kwargs):
"""
Compares YYYY-MM-DD-companies.xz with the newest
YYYY-MM-DD-sex-place-distances.xz and returns a DataFrame with only
the rows matching the search criteria, excluding already fetched companies.
Keyword arguments are expected: term (int), value (float) and city (str)
"""
filters = tuple(map(kwargs.get, ('term', 'value', 'city')))
if not all(filters):
raise TypeError('get_companies expects term, value and city as kwargs')
term, value, city = filters
# load companies
cols = ('cnpj', 'trade_name', 'name', 'latitude', 'longitude', 'city')
companies = load_newest_dataset(companies_path, cols)
companies['cnpj'] = companies['cnpj'].str.replace(r'\D', '')
# load & fiter reimbursements
cols = ('total_net_value', 'cnpj_cpf', 'term')
reimbursements = load_newest_dataset('data/*-reimbursements.xz', cols)
query = '(term == {}) & (total_net_value >= {})'.format(term, value)
reimbursements = reimbursements.query(query)
# load & filter companies
on = dict(left_on='cnpj', right_on='cnpj_cpf')
companies = pd.merge(companies, reimbursements, **on)
del(reimbursements)
companies.drop_duplicates('cnpj', inplace=True)
query = 'city.str.upper() == "{}"'.format(city.upper())
companies = companies.query(query)
# clean up companies
del(companies['cnpj_cpf'])
del(companies['term'])
del(companies['total_net_value'])
del(companies['city'])
# load sexplaces & filter remaining companies
cols = ('cnpj', )
sex_places = load_newest_dataset('data/*-sex-place-distances.xz', cols)
if sex_places is None or sex_places.empty:
return companies
return companies[~companies.cnpj.isin(sex_places.cnpj)]
def is_new_dataset(output):
sex_places = find_newest_file('*sex-place-distances.xz', 'data')
if not sex_places:
return True
# convert previous database from xz to csv
pd.read_csv(sex_places, dtype=DTYPE).to_csv(output, index=False)
os.remove(sex_places)
return False
def convert_to_lzma(csv_output, xz_output):
uncompressed = | pd.read_csv(csv_output, dtype=DTYPE) | pandas.read_csv |
import seaborn as sns
import pandas as pd
import matplotlib
from ipdb import set_trace as tt
# matplotlib.use('AGG') # 或者PDF, SVG或PS
from matplotlib.ticker import FuncFormatter
import matplotlib.pyplot as plt
import json
import os
import os.path as osp
import numpy as np
DIV_LINE_WIDTH = 50
# Global vars for tracking and labeling data at load time.
exp_idx = 0
units = dict()
def plot_data_ours(data, environments, xaxis='Epoch', value="AverageEpRet", condition="Condition1", smooth=1, **kwargs):
# plot_main
if smooth > 1:
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
y = np.ones(smooth)
for datum in data:
x = np.asarray(datum[value])
z = np.ones(len(x))
smoothed_x = np.convolve(x,y,'same') / np.convolve(z,y,'same')
datum[value] = smoothed_x
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
try:
datastd = data.groupby(['Epoch', 'Condition1', 'Condition3']).std().reset_index()
datamean = data.groupby(['Epoch', 'Condition1', 'Condition3']).mean().reset_index()
except KeyError:
pass
plot_data20 = {}
plot_data50 = {}
for e in environments:
std = datastd[datastd.Condition1==e][datastd.Condition3==str(20)].AverageEpRet.tolist()
mean = datamean[datamean.Condition1==e][datamean.Condition3==str(20)].AverageEpRet.tolist()
plot_data20[e] = np.array([std, mean])
std = datastd[datastd.Condition1 == e][datastd.Condition3 == str(50)].AverageEpRet.tolist()
mean = datamean[datamean.Condition1 == e][datamean.Condition3 == str(50)].AverageEpRet.tolist()
plot_data50[e] = np.array([std, mean])
return plot_data20, plot_data50
def data_with_suffix(root, suffix, environments, axis='TotalEnvInteracts'):
if root[-1] != '/':
root += "/"
#logdirs = [root for env in environments]
logdirs = [root+env for env in environments]
res = {}
for env, dir in zip(environments, logdirs):
print('dir',dir)
df_env = pd.concat(get_datasets(dir), ignore_index=True)
RR = df_env.groupby(axis)
std = df_env.groupby(axis).std().reset_index().AverageEpRet
df_mean = df_env.groupby(axis).mean().reset_index()
mean = df_mean.AverageEpRet
xaxis = df_mean[axis]
res[env] = np.array([xaxis, mean, std])
return res
def get_datasets(logdir, condition=None):
"""
Recursively look through logdir for output files produced by
spinup.logx.Logger.
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
if condition and '-' in condition:
condition, condition3 = condition.split("-")
else:
condition3 = ""
datasets = []
for root, _, files in os.walk(logdir):
if 'progress.txt' in files:
exp_name = None
try:
config_path = open(os.path.join(root,'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
except:
print('No file named config.json')
condition1 = condition or exp_name or 'exp'
condition2 = condition1 + '-' + str(exp_idx)
exp_idx += 1
if condition1 not in units:
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'
exp_data.insert(len(exp_data.columns),'Unit',unit)
exp_data.insert(len(exp_data.columns),'Condition1',condition1)
exp_data.insert(len(exp_data.columns),'Condition2',condition2)
exp_data.insert(len(exp_data.columns),'Condition3', condition3)
exp_data.insert(len(exp_data.columns),'Performance',exp_data[performance])
datasets.append(exp_data)
# print(exp_data)
return datasets
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1]==os.sep:
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x : osp.join(basedir, x)
prefix = logdir.split(os.sep)[-1]
listdir= os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not(x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '='*DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '='*DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not(legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data
def plot_data(data_to_plot, environments):
num_env = len(environments)
legends = data_to_plot.keys()
#colors = ['red', 'blue', 'teal', 'green', 'green', 'lightskyblue']
colors = ['blue', 'red']
figure = plt.figure(figsize=(30, 5))
def million_formatter(x, pos):
return '%.1fM' % (x * 1e-6)
formatter = FuncFormatter(million_formatter)
#axs = [plt.subplot(161 ), plt.subplot(162 ), plt.subplot(163),
# plt.subplot(164 ), plt.subplot(165 ), plt.subplot(166 )]
axs = [plt.subplot(151 ), plt.subplot(152 ), plt.subplot(153 ), plt.subplot(154 ), plt.subplot(155 )]
font = font1 = {'family': 'Times New Roman',
'weight': 'normal',
'size': 20,
}
for i, ax in enumerate(axs):
ax.set_facecolor((0.95, 0.95, 0.95))
ax.xaxis.set_major_formatter(formatter)
ax.set_xlabel(environments[i], fontdict=font)
#ours = data_to_plot['Ours'][environments[i]][1]
#max_PPO = max(ppo)
#z = 0
#while ppo[z] < 0.2 * max_PPO:
# z+=1
# print(z)
#Contrast = data_to_plot["PPO"][environments[i]][1]
#ACFD = data_to_plot["ACFD"][environments[i]][1]
#mm = 0
#while our[mm] < 0.2 * max_PPO:
# mm += 1
# print(mm)
#print(environments[i], mm / z)
ax.grid(color='white', linestyle='-', linewidth=2, alpha=0.6)
for j, k in enumerate(data_to_plot.keys()):
method = data_to_plot[k]
x = method[environments[i]][0]
y = method[environments[i]][1]
std = method[environments[i]][2]
cut = 10000
x, y ,std = x[:cut], y[:cut], std[:cut]
ax.plot(x, y, color=colors[j], linewidth=2, label=k)
ax.fill_between(x, y+std, y-std, facecolor=colors[j], alpha=0.3)
ax.legend(ncol=1, fontsize=11)
plt.plot()
plt.show()
def make_plots(root_path, environments, legend=None, xaxis=None, values=None, count=False,
font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean'):
# all_data_dict = {}
all_env_path = []
legend = []
for e in environments:
# all_data_dict[e] = {}
for k in [100, 200, 500]:
acdf_suffix = 'ACDF_pi{}_vf{}'.format(k, k)
all_env_path.append(root_path+e+acdf_suffix)
legend.append(e+'-'+str(k))
# all_data_dict[e][k] = get_all_datasets(env_path)
suffix = [] #, 'ACDF_pi50_vf50']
data_to_plot = {}
# data_to_plot['OursWithReward5'] = data_with_suffix(root_path, 'ACDF_pi50_vf50', environments)
#data_to_plot['WithDemon'] = data_with_suffix(root_path + 'PPO_With_Demons/', suffix='', environments=environments)
#data_to_plot['Integrated'] = data_with_suffix(root_path + 'Ours', suffix='', environments=environments)
#data_to_plot['Ours'] = data_with_suffix(root_path, 'ACDF_pi20_vf20', environments)
#data_to_plot['NoDemon'] = data_with_suffix(root_path + 'PPO_No_Demons/', suffix='', environments=environments)
data_to_plot['PPO'] = data_with_suffix(root_path + 'PPO_No_Demons', suffix='', environments=environments)
#data_to_plot['Pre-trained'] = data_with_suffix(root_path + 'ACfD', suffix='',environments=environments)
data_to_plot['Hierarchical Sampling'] = data_with_suffix(root_path + 'PPO_With_Demons', suffix='',environments=environments)
# data_to_plot['20'] = data_with_suffix(root_path + 'no_reward/', suffix='ACDF_pi50_vf50', environments=environments)
# data_to_plot['50'] = data_with_suffix(root_path, suffix='ACDF_pi50_vf50', environments=environments)
# data_to_plot['100'] = data_with_suffix(root_path, suffix='ACDF_pi20_vf20', environments=environments)
# data_to_plot['200'] = data_with_suffix(root_path, suffix='ACDF_pi200_vf200', environments=environments)
# data_to_plot['500'] = data_with_suffix(root_path, suffix='ACDF_pi500_vf500', environments=environments)
# import pickle
# with open("data_to_plot.pickle", "wb") as fp:
# pickle.dump(data_to_plot, fp, protocol=pickle.HIGHEST_PROTOCOL)
# with open("data_to_plot.pickle", "rb") as fp:
# data_to_plot = pickle.load(fp)
table_dict = {}
for m in data_to_plot:
print(m)
table_dict[m] = []
for env in data_to_plot[m]:
print(env)
tmp = data_to_plot[m][env]
# table_dict[m].append((tmp[1][0], tmp[2][0]))
index = np.argmax(tmp[1])
table_dict[m].append((tmp[1][index], tmp[2][index]))
df = | pd.DataFrame(table_dict) | pandas.DataFrame |
import pandas as pd
import abc
from typing import Iterable
from datetime import datetime
class TableWriter(abc.ABC):
@abc.abstractmethod
def __init__(self, rows: Iterable[Iterable]) -> None:
pass
@abc.abstractmethod
def set_rows(self, rows: Iterable[Iterable]) -> None:
pass
@abc.abstractmethod
def write_to_file(self, file_path: str, header_row: Iterable[str]):
pass
class ExcelWriter(TableWriter):
DEFAULT_FILE_PATH: str = f"./saved_documents/{datetime.now().strftime('%Y-%m-%dT%H-%M-%S')}.xlsx"
def __init__(self, rows: Iterable[Iterable]) -> None:
super().__init__(rows)
self.__rows: Iterable[Iterable] = rows
self.__dataframe: pd.DataFrame = | pd.DataFrame(data=rows) | pandas.DataFrame |
import pandas as pd
import datetime
import pytz
class TimeSeries:
@classmethod
def align_timezone(cls, time_series: pd.Series, tzinfo: [str, pytz.timezone]):
"""
Sometimes a time_series is of pandas type "object" just because the time-zone information
is not well read initially.
Example :
time_series = a time_series with some times at timezone +01:00 (French winter time)
and others at timezone +02:00 (French summer time)
So its pandas dtype is "object"
tz = pytz.timezone("Europe/Paris")
We want to make sure the timezone information is tzinfo for each row and also for the series.
:param time_series: a series with tz-aware date-times
:param tzinfo: a str or a datetime.tzinfo
:return: a DatetimeIndex of dtype: datetime[ns, tzinfo]
"""
if not isinstance(time_series, pd.Series) and not isinstance(time_series, pd.DatetimeIndex):
raise TypeError("parameter 'series' should be a pandas.Series")
if isinstance(tzinfo, str):
tzinfo = pytz.timezone(tzinfo)
if not isinstance(tzinfo, datetime.tzinfo):
raise TypeError("parameter 'tzinfo' should be of type str or datetime.tzinfo")
result = time_series.map(lambda x: x.astimezone(tzinfo))
# now all values in 'result' have the same type: pandas.Timestamp with the same tzinfo
result = pd.DatetimeIndex(result)
return result
@classmethod
def find_missing_and_extra_periods(
cls,
dti,
expected_freq=None,
expected_start_datetime=None,
expected_end_datetime=None):
"""
Check for missing and extra data points
:param dti: a series of type DatetimeIndex, in ascending order and without duplicates or NaNs
:param expected_freq: pandas formatted frequency. If None is given, will infer the frequency, taking the
most common gap between 2 consecutive points.
:param expected_start_datetime: a pandas.Datetime, if None is given, will take dti[0]
:param expected_end_datetime: a pandas.Datetime, if None is given, will take dti[-1]
:return: Missing and extra data points collapsed in periods
"""
if not isinstance(dti, pd.DatetimeIndex):
raise TypeError("parameter 'dti' should be a pandas.DatetimeIndex")
if dti.isna().sum() != 0:
raise ValueError("given dti has NaN values.")
if dti.duplicated().sum() != 0:
raise ValueError("given dti has duplicates.")
if not dti.equals(dti.sort_values(ascending=True)):
raise ValueError("given dti is not in ascending order")
if len(dti) == 0:
raise ValueError("given dti is empty")
if expected_start_datetime is not None:
if not isinstance(expected_start_datetime, pd.Timestamp):
raise TypeError("expected_start_datetime must be a pandas.Datetime")
start = expected_start_datetime
else:
start = dti[0]
if expected_end_datetime is not None:
if not isinstance(expected_end_datetime, pd.Timestamp):
raise TypeError("expected_end_datetime must be a pandas.Datetime")
end = expected_end_datetime
else:
end = dti[-1]
if expected_freq is not None:
if not isinstance(expected_freq, str) and not isinstance(expected_freq, pd.Timedelta):
raise TypeError("expected_freq should be str or pd.Timedelta")
freq = expected_freq
else:
# Infer frequency
# compute the gap distribution
gap_dist = (pd.Series(dti[1:]) - pd.Series(dti[:-1])).value_counts()
# gap_dist =
# 01:00:00 1181
# 02:00:00 499
# 03:00:00 180
# ....
# take the most common gap and use it as expected_freq
freq = gap_dist.index[0] # this is a pd.Timedelta object
assert isinstance(freq, pd.Timedelta)
expected_index = pd.date_range(start, end, freq=freq)
missing_points = expected_index.difference(dti)
# group missing points together as "missing periods"
missing_periods = cls.collapse_dt_series_into_periods(missing_points, freq=freq)
extra_points = dti.difference(expected_index)
return freq, missing_periods, extra_points
@classmethod
def collapse_dt_series_into_periods(
cls,
dti: pd.DatetimeIndex,
freq: [str, pd.Timedelta]
):
"""
This function does not work if freq < 1s
:param freq:
:param dti: DatetimeIndex, must be sorted
:return:
"""
assert isinstance(dti, pd.DatetimeIndex)
assert isinstance(freq, str) or isinstance(freq, pd.Timedelta)
if pd.to_timedelta(freq).total_seconds() < 1.0:
raise ValueError("freq must be more than 1 second, but given {}".format(freq))
if dti.shape[0] == 0:
return []
current_period_start = dti[0]
periods_list = []
for i in range(1, dti.shape[0]):
if dti[i] <= dti[i-1]:
raise ValueError("dti must be sorted and without duplicates!")
if (dti[i] - dti[i-1]).total_seconds() % pd.to_timedelta(freq).total_seconds() != 0:
raise ValueError("Timedelta between {} and {} is not a multiple of freq ({})."
.format(dti[i-1], dti[i], freq))
if pd.to_timedelta(freq) != dti[i] - dti[i-1]: # End the current period and start a new one
periods_list.append((current_period_start, dti[i-1]))
current_period_start = dti[i]
periods_list.append((current_period_start, dti[-1])) # Don't forget last period
return periods_list
@staticmethod
def interpolate_daily_to_sub_daily_data(
df: pd.DataFrame,
freq: [str, pd.Timedelta],
tz: [str, datetime.tzinfo],
index_name: str = 'time',
method: str = 'ffill'):
"""
Interpolate daily data in a dataframe (with a DatetimeIndex) to sub-daily data using a given method.
:param df: pd.DataFrame
:param freq: a frequency < 'D' (e.g. 'H', '30min', '15min', etc)
:param tz: the time zone (None not accepted because important)
:param index_name: name to give to the new index. Usually going from 'date' to 'time'.
:param method: how are data interpolated between two consecutive dates (e.g. 'ffill', 'linear', etc)
:return: pd.DataFrame
"""
assert type(df.index) == pd.DatetimeIndex
assert | pd.to_timedelta(freq) | pandas.to_timedelta |
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
os.getcwd()
# Request for the filename
# Current version of this script works only with TSV type files
mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt): ')
print()
# To create proper dataframe, transforming it with numpy
# Then changing it with pandas
filenameData = np.genfromtxt(mainFilename, dtype='str')
filenameData = pd.DataFrame(filenameData)
# Obtains first row to identify header is string or numeric
headers = filenameData.iloc[0]
try:
| pd.to_numeric(headers) | pandas.to_numeric |
import pandas as pd
import numpy as np
def analyze_tkpm_results(file_dir, output_dir):
machine_legend = pd.read_excel(file_dir, sheet_name="machine_legend", index_col=0)
machine_legend.columns = ["machine_name"]
total_overtime_results = pd.read_excel(file_dir, sheet_name="A")
total_investment_results = pd.read_excel(file_dir, sheet_name="U")
total_shift_results = pd.read_excel(file_dir, sheet_name="L")
overtime_results = machine_legend
investment_results = machine_legend
shift_results = {x: machine_legend for x in range(1, 6)}
total_overtime_results = total_overtime_results[
total_overtime_results[total_overtime_results.columns[0]] == 2].iloc[:, 1:]
for i in range(1, 6):
temp_schedule = total_overtime_results[total_overtime_results[total_overtime_results.columns[1]] == i].copy()
temp_schedule.set_index(temp_schedule.columns[0], inplace=True)
temp_schedule.drop(temp_schedule.columns[0], axis=1, inplace=True)
temp_schedule = temp_schedule.sum(axis=1)
overtime_results = pd.concat([overtime_results, pd.DataFrame(temp_schedule, columns=[i])], axis=1).fillna(0)
overtime_results.sort_values(overtime_results.columns[0], inplace=True, ascending=True)
overtime_results.columns = [""] + list(overtime_results.columns[1:])
overtime_results.set_index(overtime_results.columns[0], inplace=True)
for i in range(1, 6):
temp_investment = total_investment_results[total_investment_results[total_investment_results.columns[1]] == i].copy()
temp_investment.set_index(temp_investment.columns[0], inplace=True)
temp_investment.drop(temp_investment.columns[0], axis=1, inplace=True)
temp_investment = temp_investment.sum(axis=1)
investment_results = pd.concat([investment_results, pd.DataFrame(temp_investment, columns=[i])], axis=1).fillna(
0)
investment_results.sort_values(investment_results.columns[0], inplace=True, ascending=True)
investment_results.columns = [""] + list(investment_results.columns[1:])
investment_results.set_index(investment_results.columns[0], inplace=True)
for i in range(1, 6):
temp_shift = total_shift_results[total_shift_results[total_shift_results.columns[1]] == i].copy()
temp_shift.set_index(temp_shift.columns[0], inplace=True)
temp_shift.drop(temp_shift.columns[0], axis=1, inplace=True)
shift_results[i] = pd.concat([shift_results[i], temp_shift], axis=1).fillna(0)
shift_results[i].sort_values(shift_results[i].columns[0], inplace=True, ascending=True)
shift_results[i].columns = [""] + list(shift_results[i].columns[1:])
shift_results[i].set_index(shift_results[i].columns[0], inplace=True)
with pd.ExcelWriter(output_dir + "/TKPM_Analysis.xlsx") as writer:
overtime_results.to_excel(writer, sheet_name="Fazla Mesai")
investment_results.to_excel(writer, sheet_name="Yatırım")
for i in range(1, 6):
shift_results[i].to_excel(writer, sheet_name="Vardiya_Senaryo_" + str(i))
def analyze_okpm_results(file_dir, output_dir):
machine_legend = pd.read_excel(file_dir, sheet_name="machine_legend", index_col=0)
machine_legend.columns = ["machine_name"]
results = pd.read_excel(file_dir, sheet_name="A")
results = results[results[results.columns[0]] == 2].iloc[:, 1:]
results.set_index(results.columns[0], inplace=True)
results = pd.concat([machine_legend, results], axis=1).fillna(0)
results.sort_values(results.columns[0], inplace=True, ascending=True)
results.columns = [""] + list(results.columns[1:])
results.set_index(results.columns[0], inplace=True)
with pd.ExcelWriter(output_dir + "/OKPM_Analysis.xlsx") as writer:
results.to_excel(writer, sheet_name="<NAME>")
def analyze_okpb_results(file_dir, output_dir):
current_part = ""
tallies = pd.DataFrame(columns=["Identifier", "Average", "Half Width", "Minimum", "Maximum", "Observations"])
nq = | pd.DataFrame(columns=["Identifier", "Average", "Half Width", "Minimum", "Maximum", "Final Value"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 10 14:24:56 2021
@author: <NAME>
Script created for determination of optimal power generation mix looking at
interannual power production variability of DK1 and DK2.
- Plots the generation mix as function of time
- Plots the average optimal capacity with standard deviation as function of
technology
Reads data for the period 2015-2019 dowloaded from
data.open-power-system-data.org
Capacity factor is determined using installed capacity per production type
data from www.transparency.entsoe.eu
"""
#%% Import and define
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
def annuity(n,r):
"""Calculate the annuity factor for an asset with lifetime n years and
discount rate of r, e.g. annuity(20,0.05)*20 = 1.6"""
if r > 0:
return r/(1. - 1./(1.+r)**n)
else:
return 1/n
# Create network and snapshot
network = pypsa.Network()
hours_in_2015 = pd.date_range('2015-01-01T00:00Z','2015-12-31T23:00Z', freq='H')
hours_in_2016 = | pd.date_range('2016-01-01T00:00Z','2016-12-31T23:00Z', freq='H') | pandas.date_range |
#read a csv file, loading it into a DataFrame
import numpy as np #python's array proccesing / linear algebra library
import pandas as pd #data processing / stats library
import matplotlib.pyplot as plt #data visualization
import csv
#read in some data
fn = 'polling_data.csv'
df=pd.read_csv(fn)
#we can manually sets print options (lots of stuff like precision, max_colwidth avail)
pd.set_option('display.width', 500)
pd.set_option('display.max_rows', 5)
print("Here's what our data looks like:\n")
print(df.head(n=4))
#these are the indices auto-loaded:
print('\n')
print('row index:')
print(df.index)
print('\ncolumn index:')
print(df.columns)
print('\ncheck out the data types:')
pd.set_option('display.max_rows', 10)
print(df.dtypes)
#to select one or more columns, use slice notation
print('\n')
print(df["Datetime"])
#note that Datetime is not right (it came from Excel). Here's a fix:
df=df.assign(Datetime=pd.to_datetime('1899-12-30') + pd.to_timedelta(df['Datetime'], 'D'))
#df.pop("Datetime")
print('\nMake sure it "took":')
| pd.set_option('display.max_rows', 11) | pandas.set_option |
#!/usr/bin/env python3
import argparse
import csv
import gzip
import io
import json
import os
from os import walk
import shutil
import sys
import tempfile
from datetime import datetime
import pandas as pd
import pyarrow as pa
import itertools
from io import StringIO
from sys import getsizeof
import pickle
import singer
from jsonschema import Draft4Validator, FormatChecker
from target_s3 import s3
from target_s3 import utils
logger = singer.get_logger()
def write_temp_pickle(data={}):
temp_unique_pkl = 'temp_unique.pickle'
dir_temp_file = os.path.join(tempfile.gettempdir(), temp_unique_pkl)
with open(dir_temp_file, 'wb') as handle:
pickle.dump(data, handle)
def read_temp_pickle():
data = {}
temp_unique_pkl = 'temp_unique.pickle'
dir_temp_file = os.path.join(tempfile.gettempdir(), temp_unique_pkl)
if os.path.isfile(dir_temp_file):
with open(dir_temp_file, 'rb') as handle:
data = pickle.load(handle)
return data
# Upload created files to S3
def upload_to_s3(s3_client, s3_bucket, filename, stream, field_to_partition_by_time,
record_unique_field, compression=None, encryption_type=None, encryption_key=None):
data = None
df = None
final_files_dir = ''
with open(filename, 'r') as f:
data = f.read().splitlines()
df = pd.DataFrame(data)
df.columns = ['json_element']
df = df['json_element'].apply(json.loads)
df = pd.json_normalize(df)
logger.info('df orginal size: {}'.format(df.shape))
if df is not None:
if record_unique_field and record_unique_field in df:
unique_ids_already_processed = read_temp_pickle()
df = df[~df[record_unique_field].isin(unique_ids_already_processed)]
logger.info('df filtered size: {}'.format(df.shape))
df = df.drop_duplicates()
logger.info('df after drop_duplicates size: {}'.format(df.shape))
# df = df.groupby(record_unique_field).first().reset_index()
# logger.info('df first record of each unique_id size: {}'.format(df.shape))
new_unique_ids = set(df[record_unique_field].unique())
logger.info('unique_ids_already_processed: {}, new_unique_ids: {}'.format(
len(unique_ids_already_processed), len(new_unique_ids)))
unique_ids_already_processed = set(unique_ids_already_processed).union(new_unique_ids)
write_temp_pickle(unique_ids_already_processed)
df = df.infer_objects()
dtypes = {}
for c in df.columns:
try:
df[c] = pd.to_numeric(df[c])
dtypes[str(df[c].dtype)] = dtypes.get(str(df[c].dtype), 0) + 1
except:
pass
logger.info('df info: {}'.format(dtypes))
logger.info('df infer_objects/to_numeric size: {}'.format(df.shape))
dir_path = os.path.dirname(os.path.realpath(filename))
final_files_dir = os.path.join(dir_path, s3_bucket)
final_files_dir = os.path.join(final_files_dir, stream)
insert_id_count = len(df[record_unique_field].unique())
logger.info('df size: {}, record_unique_field count: {}'.format(df.shape, insert_id_count))
logger.info('final_files_dir: {}'.format(final_files_dir))
df['idx_day'] = pd.DatetimeIndex( | pd.to_datetime(df[field_to_partition_by_time]) | pandas.to_datetime |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
sg = StringGrouper(df1, min_similarity=0.1)
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def test_overflow_error_with(OverflowThreshold, n_blocks):
nonlocal sg
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
max_left_block_size = (len(df1)//n_blocks[0]
+ (1 if len(df1) % n_blocks[0] > 0 else 0))
max_right_block_size = (len(df1)//n_blocks[1]
+ (1 if len(df1) % n_blocks[1] > 0 else 0))
if (max_left_block_size + max_right_block_size) > OverflowThreshold:
with self.assertRaises(Exception):
_ = sg.match_strings(df1, n_blocks=n_blocks)
else:
matches_manual = fix_row_order(sg.match_strings(df1, n_blocks=n_blocks))
pd.testing.assert_frame_equal(matches11, matches_manual)
test_overflow_error_with(OverflowThreshold=100, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(2, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 2))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(4, 4))
def test_n_blocks_both_DataFrames(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df['Customer Name']
df2 = simple_example.customers_df2['Customer Name']
matches11 = fix_row_order(match_strings(df1, df2, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, df2, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
def test_n_blocks_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=2)
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(0, 2))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2.5))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2, 3))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, ))
def test_tfidf_dtype_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=None)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=0)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype='whatever')
def test_compute_pairwise_similarities(self):
"""tests the high-level function compute_pairwise_similarities"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
similarities = compute_pairwise_similarities(df1, df2)
expected_result = pd.Series(
[
1.0,
0.6336195351561589,
1.0000000000000004,
1.0000000000000004,
1.0,
0.826462625999832
],
name='similarity'
)
expected_result = expected_result.astype(np.float32)
pd.testing.assert_series_equal(expected_result, similarities)
sg = StringGrouper(df1, df2)
similarities = sg.compute_pairwise_similarities(df1, df2)
pd.testing.assert_series_equal(expected_result, similarities)
def test_compute_pairwise_similarities_data_integrity(self):
"""tests that an exception is raised whenever the lengths of the two input series of the high-level function
compute_pairwise_similarities are unequal"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
with self.assertRaises(Exception):
_ = compute_pairwise_similarities(df1, df2[:-2])
@patch('string_grouper.string_grouper.StringGrouper')
def test_group_similar_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function group_similar_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = group_similar_strings(
test_series_1,
string_ids=test_series_id_1
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_most_similar(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_most_similar utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_2 = None
test_series_id_1 = None
test_series_id_2 = None
df = match_most_similar(
test_series_1,
test_series_2,
master_id=test_series_id_1,
duplicates_id=test_series_id_2
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_matches.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = match_strings(test_series_1, master_id=test_series_id_1)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_matches.assert_called_once()
self.assertEqual(df, 'whatever')
@patch(
'string_grouper.string_grouper.StringGrouper._symmetrize_matrix',
side_effect=mock_symmetrize_matrix
)
def test_match_list_symmetry_without_symmetrize_function(self, mock_symmetrize_matrix_param):
"""mocks StringGrouper._symmetrize_matches_list so that this test fails whenever _matches_list is
**partially** symmetric which often occurs when the kwarg max_n_matches is too small"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
mock_symmetrize_matrix_param.assert_called_once()
# obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# switch the column names of lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# if the intersection is empty then _matches_list is completely non-symmetric (this is acceptable)
# if the intersection is not empty then at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertFalse(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
def test_match_list_symmetry_with_symmetrize_function(self):
"""This test ensures that _matches_list is symmetric"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
# Obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# Switch the column names of the lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# Obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# If the intersection is empty this means _matches_list is completely non-symmetric (this is acceptable)
# If the intersection is not empty this means at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertTrue(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
@patch(
'string_grouper.string_grouper.StringGrouper._fix_diagonal',
side_effect=mock_symmetrize_matrix
)
def test_match_list_diagonal_without_the_fix(self, mock_fix_diagonal):
"""test fails whenever _matches_list's number of self-joins is not equal to the number of strings"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['<NAME>']
matches = match_strings(df, max_n_matches=1)
mock_fix_diagonal.assert_called_once()
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertNotEqual(num_self_joins, num_strings)
def test_match_list_diagonal(self):
"""This test ensures that all self-joins are present"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['Customer Name']
matches = match_strings(df, max_n_matches=1)
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertEqual(num_self_joins, num_strings)
def test_zero_min_similarity(self):
"""Since sparse matrices exclude zero elements, this test ensures that zero similarity matches are
returned when min_similarity <= 0. A bug related to this was first pointed out by @nbcvijanovic"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.whatever_series_1
matches = match_strings(s_master, s_dup, min_similarity=0)
pd.testing.assert_frame_equal(simple_example.expected_result_with_zeroes, matches)
def test_zero_min_similarity_small_max_n_matches(self):
"""This test ensures that a warning is issued when n_max_matches is suspected to be too small while
min_similarity <= 0 and include_zeroes is True"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.two_strings
with self.assertRaises(Exception):
_ = match_strings(s_master, s_dup, max_n_matches=1, min_similarity=0)
def test_get_non_matches_empty_case(self):
"""This test ensures that _get_non_matches() returns an empty DataFrame when all pairs of strings match"""
simple_example = SimpleExample()
s_master = simple_example.a_few_strings
s_dup = simple_example.one_string
sg = StringGrouper(s_master, s_dup, max_n_matches=len(s_master), min_similarity=0).fit()
self.assertTrue(sg._get_non_matches_list().empty)
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
c = csr_matrix([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
expected_matches = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe)[0].toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = pd.Series(['FOO', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_get_matches_two_dataframes(self):
test_series_1 = | pd.Series(['foo', 'bar', 'baz']) | pandas.Series |
"""
Biblyser (c) is a bibliometric workflow for evaluating the bib metrics of an
individual or a group of people (an organisation).
Biblyser is licensed under a MIT License.
You should have received a copy of the license along with this work. If not,
see <https://choosealicense.com/licenses/mit/>.
"""
import sys
import numpy as np
import pandas as pd
from collections import Counter
from textwrap import wrap
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
#sys.path.append('../')
from biblyser.bibcollection import getGenderDistrib, countByYear
#Import organisation from csv
org_df = pd.read_csv('output/out_organisation.csv')
#Import bibcollection from csv
df = pd.read_csv('output/out_bibs.csv')
#Convert column items to appropriate objects
df['org_led'] = df['org_led'].astype('bool')
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d %H:%M:%S')
df['year'] = df['date'].dt.year
#Define bins
bin10=[0, 1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 99, 100]
bin25=[0, 1, 25, 50, 75, 99, 100]
#Define bin labels
l10 = ['0', '1-10', '11-20', '21-30', '31-40', '41-50', '51-60', '61-70',
'71-80', '81-90', '91-99', '100']
l25 = ['0%', '1-25%', '26-50%', '51-75%', '76-99%', '100%']
#Set color palettes
cold = ['#337BA7','#08589e','#2b8cbe','#4eb3d3','#7bccc4','#a8ddb5', '#ccebc5']
warm = ['#C85353','#fed976','#feb24c','#fd8d3c','#fc4e2a','#e31a1c','#bd0026']
#------------------ Fetch and plot general publication stats ----------------
#Get count from first author publications
first = df.loc[df['org_led']==True]
first_yr = countByYear(first)
#Get count from co-author publications
coauthor = df.loc[df['org_led']==False]
co_yr = countByYear(coauthor)
#Merge and rename columns
co_yr['First author'] = first_yr['count']
all_yr = co_yr.rename(columns={'count' : 'Co-author'})
#Group journals
journals = df['journal'].groupby(df['journal']).agg({'count'}).sort_values(by=['count'],
ascending=True)
j10 = journals.tail(10)
others = len(list(journals['count'][:-10]))
#Affiliations of authorship
affiliations = list(df['affiliations'])
out1=[]
for a in affiliations:
allc = a.split(', ')
[out1.append(a) for a in allc]
out1 = Counter(out1).most_common()
aff_keys10 = [o[0] for o in out1[1:11]]
aff_vals10 = [o[1] for o in out1[1:11]]
aff_keys10.append('Others')
aff_vals10.append(sum(sorted(list(Counter(out1).values()))[11:]))
#Countries of authorship
countries = list(df['countries'])
out=[]
for c in countries:
allc = c.split(', ')
[out.append(a) for a in allc]
out = Counter(out).most_common()
co_keys10 = [o[0] for o in out[0:10]]
co_vals10 = [o[1] for o in out[0:10]]
co_keys10.append('Others')
co_vals10.append(sum(sorted(list(Counter(out).values()))[10:]))
#Prime subplots
fig1, ax1 = plt.subplots(1, 1, figsize=(10,10))
fig1.tight_layout(pad=4, h_pad=8, w_pad=0)
ax2 = ax1.inset_axes([0.2,0.47,0.3,0.2]) #Journals bar plt
ax3 = ax1.inset_axes([0.21,0.75,0.2,0.2]) #Pie #1
ax4 = ax1.inset_axes([0.54,0.75,0.2,0.2]) #Pie #2
#Set font styles and colour palettes
fontname='Arial'
title = 18
lfont1 = 14
lfont2 = 10
lfont3 = 7
tfont = {'fontsize':10, 'color':'#5D5D5D'}
bar_col = ['#0C7BDC','#FFC20A', '#CA4646']
pie_col = ['#332288','#88CCEE','#44AA99','#117733','#999933','#DDCC77',
'#CC6677','#882255','#AA4499','#6F3866','#DDDDDD']
#Plot year vs. authorships
all_yr.plot(kind='bar', stacked=False, color=[bar_col[0],bar_col[1]], ax=ax1)
ax1.set(ylim=(0,40), yticks=[0,10,20,30,40], xlabel='Date',
ylabel='Number of publications')
#Alter plot aesthetics
ax1.tick_params(axis='both', labelsize=tfont['fontsize'], labelcolor=tfont['color'])
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
#Set annotations
ax1.set_ylabel('Number of publications', labelpad=10, fontsize=lfont1)
ax1.set_xlabel('Date', labelpad=10, fontsize=lfont1)
ax1.legend(loc=4, fontsize=lfont2, framealpha=1,
title='Publications by year')
#Plot popular journals
ax2.barh([l*2 for l in np.arange(10)], list(j10['count']), color=bar_col[2])
ax2.set_yticks([l*2 for l in np.arange(10)])
labels = [ '\n'.join(wrap(l, 30)) for l in list(j10.index)]
ax2.set_yticklabels(labels, fontsize=lfont3)
ax2.tick_params(axis='x', labelsize=8, labelcolor=tfont['color'])
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.text(20, 0.5, f'Number of other journals: {others}', fontsize=lfont3)
#Plot top collaborator affiliations
p3,t3 = ax3.pie(aff_vals10, startangle=90, colors=pie_col,
wedgeprops={"edgecolor":"w",'linewidth':1})
legend_labels = [ '\n'.join(wrap(l, 30)) for l in aff_keys10]
ax3.legend(legend_labels, loc='center left', bbox_to_anchor=(-1.0, 0.5),
fontsize=lfont3)
#Plot top collaborator countries
p4,t4 = ax4.pie(co_vals10, startangle=90, colors=pie_col,
wedgeprops={"edgecolor":"w",'linewidth':1})
legend_labels = [ '\n'.join(wrap(l, 15)) for l in co_keys10]
ax4.legend(legend_labels, loc='center left', bbox_to_anchor=(-0.6, 0.5),
fontsize=lfont3)
#Plot summary table
ax4 = ax1.inset_axes([-0.02,0.38,0.4,0.1])
cells = [['Total publications', str(len(df.index))],
['Organisation-led publications', str(len(first.index))],
['Co-authored publications', str(len(coauthor.index))],
['Average citation count', str(int(np.nanmean(list(df['citations']))))],
['Average altmetrics', str(int(np.nanmean(list(df['altmetric']))))]]
table = ax4.table(cellText=cells, colWidths=[0.6,0.2], edges='horizontal',
cellLoc='left')
ax4.axis("off")
table.scale(1, 1.25)
table.auto_set_font_size(False)
table.set_fontsize(lfont3)
#Set annotations
plt.title('GEUS publications', fontsize=title)
ax1.text(1, 39.7, 'Top collaborators', fontsize=lfont1)
ax1.text(1, 27.2, 'Top journals', fontsize=lfont1)
ax1.text(1, 15.8, 'Summary statistics', fontsize=lfont1)
#Plot and save
plt.rcParams["font.family"] = fontname
plt.savefig('output/publication_stats.jpg', dpi=300)
plt.show()
# plt.close()
#------------ Publication lead and co-authorship by gender ----------------
#Set font styles
hfont = {'fontname':'Arial', 'fontsize':16}#, 'fontweight': 'bold'}
lfont1 = {'fontname':'Arial', 'fontsize':12, 'color':'#5D5D5D'}
tfont = {'fontname':'Arial', 'fontsize':8, 'color':'#5D5D5D'}
#Get org gender from org papers
df1= | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
check_exact=False):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
if check_dtype:
assert_attr_equal('dtype', left, right, obj='ExtensionArray')
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
else:
_testing.assert_almost_equal(left_valid, right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj='ExtensionArray')
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and | is_categorical_dtype(right) | pandas.core.dtypes.common.is_categorical_dtype |
import pandas as pd
import numpy as np
from upload_data import *
import plotly.express as px
#quantos pokemons por geração?
print('----- Qtde Pokémon x Geração -----')
gen_df = pokemon_df.groupby('gen').count()
data_gen = gen_df['national_number']
data_gen = pd.DataFrame(data_gen)
print(data_gen)
print('----------------------------------')
print(pokemon_df.shape)
fig = px.bar(data_gen,
text_auto=True,
title='Qtde Pokémon x Geração'
)
fig.show()
#quantos pokemons por tipo primario?
print('----- Qtde Pokémon x Tipo Primário -----')
gen_df = pokemon_df.groupby('primary_type').count()
data_gen = gen_df['national_number']
data_gen = | pd.DataFrame(data_gen) | pandas.DataFrame |
import unittest
from parameterized import parameterized
import pandas
import numpy
from pdrle import decode
class TestDecode(unittest.TestCase):
@parameterized.expand([
[pandas.Series(["a", "a", "b", "b", "b", "a", "a", "c"]),
pandas.DataFrame({"vals": ["a", "b", "a", "c"],
"runs": [2, 3, 2, 1]})],
[ | pandas.Series([1, 1, 1, 1, 1, 1, 1]) | pandas.Series |
from . import pyheclib
import pandas as pd
import numpy as np
import os
import time
import warnings
# some static functions
def set_message_level(level):
"""
set the verbosity level of the HEC-DSS library
level ranges from "bort" only (level 0) to "internal" (level >10)
"""
pyheclib.hec_zset('MLEVEL','',level)
def set_program_name(program_name):
"""
sets the name of the program (upto 6 chars long) to store with data
"""
name=program_name[:min(6,len(program_name))]
pyheclib.hec_zset('PROGRAM',name,0)
def get_version(fname):
"""
Get version of DSS File
returns a tuple of string version of 4 characters and integer version
"""
return pyheclib.hec_zfver(fname);
class DSSFile:
#DSS missing conventions
MISSING_VALUE=-901.0
MISSING_RECORD=-902.0
FREQ_EPART_MAP = {
pd.tseries.offsets.Minute(n=1):"1MIN",
pd.tseries.offsets.Minute(n=2):"2MIN",
pd.tseries.offsets.Minute(n=3):"3MIN",
| pd.tseries.offsets.Minute(n=4) | pandas.tseries.offsets.Minute |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 13:57:33 2020
@author: <NAME>
"""
# A problem with ARIMA is that it does not support seasonal data. That is a time series with a repeating cycle.
# ARIMA expects data that is either not seasonal or has the seasonal component removed, e.g. seasonally adjusted via methods such as seasonal differencing.
from timeseries.modules.config import ORIG_DATA_PATH, SAVE_PLOTS_PATH, SAVE_MODELS_PATH, \
DATA, MONTH_DATA_PATH, MODELS_PATH, SAVE_RESULTS_PATH, SAVE_PLOTS_RESULTS_PATH_BASE
from timeseries.modules.dummy_plots_for_theory import save_fig, set_working_directory
from timeseries.modules.load_transform_data import load_transform_excel
# from timeseries.modules.sophisticated_prediction import create_dict_from_monthly
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tqdm import tqdm
import warnings
import time
import glob
import os
import datetime
from statsmodels.tsa.stattools import adfuller # dickey fuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.arima.model import ARIMA, ARIMAResults
from statsmodels.tsa.statespace.sarimax import SARIMAX, SARIMAXResults
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from sklearn.metrics import mean_squared_error
def input_ar_ma(model_name):
print('\nPlease insert the AR and the MA order which you can read from the PACF and ACF plots!\nIf you like to close the input, type in <stop>!')
while True:
try:
nr = input('AR-order (PACF)\tMA-order (ACF):\n')
if 'stop' in nr:
break
nr1, nr2 = nr.split()
nr1, nr2 = int(nr1), int(nr2)
if model_name in ['SARIMAX']:
try:
nr_sari = input('Seasonal\nAR-order (PACF)\tMA-order (ACF)\tSeasonality:\n')
if 'stop' in nr_sari:
break
nr3, nr4, nr5 = nr_sari.split()
nr3, nr4, nr5 = int(nr3), int(nr4), int(nr5)
return {'AR':nr1, 'MA':nr2, 'SAR':nr3, 'SMA':nr4, 'S':nr5}
break
except ValueError:
print('\nYou did not provide three numbers.\nPlease insert three numbers and no Strings!\nFormat: <nr> <nr> <nr>')
else:
return {'AR':nr1, 'MA':nr2}
break
except ValueError:
print('\nYou did not provide two numbers.\nPlease insert two numbers and no Strings!\nFormat: <nr> <nr>')
def get_stationarity(timeseries, given_model, window, save_plots, print_results = False):
# rolling statistics
rolling_mean = timeseries.rolling(window=window).mean()
rolling_std = timeseries.rolling(window=window).std()
# rolling statistics plot
original = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolling_mean, color='red', label='Rolling Mean')
std = plt.plot(rolling_std, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation for windowsize ' + str(window) )
if save_plots:
save_fig(name = given_model + '_mean_deviation_window_' + str(window), path_img=SAVE_PLOTS_PATH)
plt.show(block = False)
# Dickey–Fuller test:
result = adfuller(timeseries)
stationary = False
if result[1] <= 0.05:
stationary = True
result = {'ADF Statistic':result[0], 'p-value':result[1], 'other_first': result[2],
'other_second':result[3], 'Critical Values':{'1%':result[4]['1%'],
'5%':result[4]['5%'],
'10%':result[4]['10%']},
'stationary': stationary}
if print_results:
print('ADF Statistic: {}'.format(result['ADF Statistic']))
print('p-value: {}'.format(result['p-value']))
print('Critical Values:')
for key, value in result['Critical Values'].items():
print('\t{}: {}'.format(key, value))
print('Stationary: {}'.format(stationary))
return result
def plot_acf_pacf(series, given_model, save_plots, rolling_window):
if len(series.values)*0.5 >= 60: # big value for seasonal AR and MA detection
lags_length = 60
else:
lags_length = len(series.values)*0.5 - 5 # -5 because otherwise lags_length to long to display
fig, (ax1,ax2) = plt.subplots(2,1, sharex=True)
fig = plot_pacf(series, zero = True, lags = lags_length, method = 'OLS', title ='Partial correlogram to find out AR value for '+ given_model +' window ' + str(rolling_window), ax = ax1, markerfacecolor='black', color = 'black')
fig = plot_acf(series, zero = True, lags = lags_length, title ='Correlogram to find out MA value for ' + given_model +' window ' + str(rolling_window), ax = ax2, markerfacecolor='black', color = 'black')
plt.show(block= False)
if save_plots:
save_fig(name = given_model + '_acf_pacf_' + str(rolling_window), path_img=SAVE_PLOTS_PATH, fig = fig)
def decompose_and_plot(dependent_var, given_model, index, rolling_window, save_dec, save_acf, save_stat, print_results = False):
series = pd.Series(dependent_var.values, index=index)
decomposition = seasonal_decompose(series)
decomposition.resid.dropna(inplace=True)
figure = decomposition.plot()
if save_dec:
save_fig(name = 'decompose_window_' + str(rolling_window), path_img=SAVE_PLOTS_PATH, fig = figure)
plot_acf_pacf(series, given_model, save_acf, rolling_window)
plt.figure(2)
dickey_fuller_results = get_stationarity(decomposition.observed, given_model = given_model, window = rolling_window, save_plots = save_stat, print_results = print_results)
return decomposition
def split(data, diff_faktor, rel_faktor):
values = pd.DataFrame(data.values)
dataframe = None
if diff_faktor != 0:
for i in range(1,diff_faktor+1):
dataframe = pd.concat([dataframe,values.shift(i)], axis= 1)
dataframe = pd.concat([dataframe, values], axis = 1)
else:
dataframe = values
X = dataframe.values
train, test = X[1:int(len(X)*rel_faktor)], X[int(len(X)*rel_faktor):]
if diff_faktor != 0:
train_X, train_y = train[:,:diff_faktor], train[:,diff_faktor]
test_X, test_y = test[:,:diff_faktor], test[:,diff_faktor]
else:
train_X, train_y = None, train
test_X, test_y = None, test
return {'Train':train, 'Test':test, 'Train_X':train_X , 'Train_y':train_y , 'Test_X': test_X, 'Test_y':test_X}
def compare_models(given_model, data, diff_faktor, rolling_window, forecast_one_step = False):
if forecast_one_step:
name_supl = '_one_step'
else:
name_supl = ''
print('\n', given_model, ' Model started:')
subresults = pd.DataFrame(columns = ['Predicted', 'Expected'])
result = {'Used Model':None, 'Model':None, 'MSE':None, 'RMSE':None, 'Orders':None}
decomposition = decompose_and_plot(dependent_var=data, given_model = given_model + name_supl, index=data.index, rolling_window=rolling_window, save_dec = True, save_acf=True, save_stat=True, print_results = True)
if given_model in ['persistance','SARIMAX']:
splitted_data = split(data = decomposition.observed, diff_faktor= 0, rel_faktor = 0.9 )
if given_model == 'SARIMAX':
order_dict = input_ar_ma(given_model)
else:
order_dict = None
elif given_model in ['ARIMA', 'ARMA']:
diff_df = decomposition.observed.diff(diff_faktor)
diff_df.dropna(inplace=True)
print('\nAfter differencing:')
get_stationarity(diff_df, given_model = given_model + name_supl, window= rolling_window, save_plots=True, print_results = True) # proove data is now stationary
plot_acf_pacf(diff_df,given_model = given_model + name_supl, save_plots=True, rolling_window=diff_faktor)
splitted_data = split(data = diff_df, diff_faktor= 0, rel_faktor = 0.9 )
order_dict = input_ar_ma(given_model)
result['Orders'] = order_dict
history = [x for x in splitted_data['Train']]
predictions = list()
if forecast_one_step:
test_length = 1
splited_length = len(splitted_data['Test'])
else:
test_length = len(splitted_data['Test'])
splited_length = 1
for i in tqdm(range(splited_length)):
# predict
warnings.filterwarnings('ignore')
if given_model == 'persistance':
model_fit = None
yhat = history[-test_length:]
elif given_model == 'ARIMA':
model_fit = ARIMA(history, order=(order_dict['AR'],1,order_dict['MA'])).fit()
yhat = model_fit.forecast(test_length)
elif given_model == 'ARMA':
model_fit = ARMA(history, order=(order_dict['AR'],order_dict['MA'])).fit(disp=0)
yhat = model_fit.forecast(test_length)[0]
elif given_model == 'SARIMAX':
model_fit = SARIMAX(history, order = (order_dict['AR'],1,order_dict['MA']),
seasonal_order=(order_dict['SAR'],1,order_dict['SMA'],order_dict['S']),
enforce_stationarity=True, enforce_invertibility = True).fit(disp = 0)
yhat = model_fit.forecast(test_length)
predictions.append(yhat)
if forecast_one_step:
# observation
obs = splitted_data['Test'][i]
history.append(obs)
subresults.loc[i] = [yhat,obs]
else:
obs = splitted_data['Test']
obs = [x for x in obs]
subresults = [yhat,obs]
# print('>Predicted={}, Expected={}'.format(yhat, obs))
result['Model'] = model_fit
result['Used Model'] = given_model
if given_model == 'persistance':
predictions = sum(predictions, [])
else:
if not forecast_one_step:
predictions = predictions[0]
result['MSE'] = np.round(mean_squared_error(splitted_data['Test'][:,0], predictions, squared = True),2)
result['RMSE'] = np.round(mean_squared_error(splitted_data['Test'][:,0], predictions, squared = False),2)
print('RMSE: %.3f' % result['RMSE'])
warnings.filterwarnings('default')
return [result, subresults]
def monthly_aggregate(data_frame, combined):
if not combined:
try:
data_frame.index = data_frame['Verkaufsdatum']
data_frame = data_frame.loc[:,data_frame.columns != 'Verkaufsdatum']
except:
data_frame.index = data_frame['date']
data_frame = data_frame.loc[:,data_frame.columns != 'date']
result_df = pd.DataFrame(index = set(data_frame.index.to_period('M')), columns = data_frame.columns)
for year_month in tqdm(set(result_df.index)):
result_df.loc[year_month] = data_frame.loc[data_frame.index.to_period('M') == year_month].sum()
result_df.index.name = 'date'
return result_df.sort_index()
def combine_dataframe(data_frame_with_all_data, monthly= False, output_print = False):
head_names = ['Einzel Menge in ST', '4Fahrt Menge in ST', 'Tages Menge in ST', 'Gesamt Menge in ST']
df1 = data_frame_with_all_data[0]
df1.index = df1['Verkaufsdatum']
result_df = pd.DataFrame(columns = head_names)
result_df[list(set(head_names) - set(['Tages Menge in ST']))] = df1[list(set(head_names) - set(['Tages Menge in ST']))]
result_df['Tages Menge in ST'] = np.zeros(result_df.shape[0], dtype = int)
for df in tqdm(data_frame_with_all_data[1:]):
df.index = df['Verkaufsdatum']
temp_df = df
for time_stamp in set(df1.index):
if time_stamp not in set(df.index):
dic = {dict_key : 0 for dict_key in df.columns[1:]}
dic['Verkaufsdatum'] = time_stamp
temp_df = temp_df.append(dic, ignore_index = True)
temp_df.index = temp_df['Verkaufsdatum']
for name in head_names:
try:
result_df[name] = result_df[name] + temp_df[name]
except:
if output_print:
print('This header is not present in temp "{}" \n'.format(name))
pass
# insert new column
result_df['Gesamt Menge in ST calc'] = result_df[['Einzel Menge in ST', '4Fahrt Menge in ST', 'Tages Menge in ST']].sum(axis = 1)
if monthly:
print('\nMonthly data aggregation:')
time.sleep(0.3)
monthly_df = monthly_aggregate(result_df, combined = True)
return monthly_df, result_df
return result_df
def create_dict_from_monthly(monthly_given_list, monthly_names_given_list, agg_monthly_list,
agg_monthly_names_list, combined = False):
monthly_given_dict = {name:data for name, data in zip(monthly_names_given_list, monthly_given_list)}
agg_monthly_dict = {name:data for name, data in zip(agg_monthly_names_list,agg_monthly_list)}
monthly_dict_copy = {}
for dic in tqdm(agg_monthly_dict):
for dic1 in agg_monthly_dict:
if dic != dic1 and dic.split('_')[1] == dic1.split('_')[1]:
used_columns = remove_unimportant_columns(agg_monthly_dict[dic].columns, ['Verkaufsdatum','Tages Wert in EUR','Einzel Wert in EUR','4Fahrt Wert in EUR', 'Gesamt Wert in EUR'])
used_columns1 = remove_unimportant_columns(agg_monthly_dict[dic1].columns, ['Verkaufsdatum','Tages Wert in EUR','Einzel Wert in EUR','4Fahrt Wert in EUR', 'Gesamt Wert in EUR'])
temp = agg_monthly_dict[dic][used_columns].merge(agg_monthly_dict[dic1][used_columns1], left_index = True, right_index = True)
temp['Gesamt Menge in ST'] = temp[['Gesamt Menge in ST_x','Gesamt Menge in ST_y']].sum(axis=1)
monthly_dict_copy[dic.split('_')[1]] = temp.drop(['Gesamt Menge in ST_x','Gesamt Menge in ST_y'], axis = 1)
lis = list()
for nr,column in enumerate(monthly_dict_copy[dic.split('_')[1]].columns):
lis.append(column.split()[0])
monthly_dict_copy[dic.split('_')[1]].columns = lis
final_dict = {}
for monthly_name, monthly_data in tqdm(monthly_given_dict.items()):
einzel = monthly_data[(monthly_data['PGR'] == 200)]
fahrt4 = einzel[einzel[einzel.columns[1]].str.contains('4-Fahrten|4 Fahrten', regex=True)]
einzel = einzel[einzel[einzel.columns[1]].str.contains('4-Fahrten|4 Fahrten', regex=True) == False]
tages = monthly_data[(monthly_data['PGR'] == 300)]
final_df = pd.DataFrame([tages.sum(axis=0, numeric_only = True)[2:],
einzel.sum(axis=0, numeric_only = True)[2:],
fahrt4.sum(axis=0, numeric_only = True)[2:]],
index=['Tages', 'Einzel', '4Fahrt'])
final_df = final_df.T
las = list()
for year_month in final_df.index:
las.append(datetime.datetime.strptime(year_month, '%Y%m'))
final_df.index = las
final_df.index = final_df.index.to_period('M')
final_df['Gesamt'] = final_df.sum(axis = 1)
final_dict[monthly_name] = pd.concat([final_df, monthly_dict_copy[monthly_name].loc[
pd.Period(max(final_df.index)+1):, : ]])
if combined:
tages = list()
einzel = list()
fahrt_4 = list()
gesamt = list()
final = pd.DataFrame()
for key in final_dict.keys():
tages.append( final_dict[key][final_dict[key].columns[0]])
einzel.append( final_dict[key][final_dict[key].columns[1]])
fahrt_4.append( final_dict[key][final_dict[key].columns[2]])
gesamt.append( final_dict[key][final_dict[key].columns[3]])
final['Tages'] = pd.DataFrame(tages).sum(axis = 0)
final['Einzel'] = pd.DataFrame(einzel).sum(axis = 0)
final['4Fahrt'] = pd.DataFrame(fahrt_4).sum(axis = 0)
final['Gesamt'] = pd.DataFrame(gesamt).sum(axis = 0)
final_dict['combined'] = final
return final_dict
def print_best_result(result_list):
if type(result_list ) == dict:
print('\nBest model {} with RMSE: {}'.format(result_list['Used Model'], result_list['RMSE']))
return result_list
else:
for nr, res in enumerate(result_list):
if nr == 0:
min_temp = res['RMSE']
temp_nr = nr
elif res['RMSE'] < min_temp:
temp_nr = nr
min_temp = res['RMSE']
print('\nBest model {} with RMSE: {}'.format(result_list[temp_nr]['Used Model'], result_list[temp_nr]['RMSE']))
return result_list[temp_nr]
def remove_unimportant_columns(all_columns, column_list):
result_columns = set(all_columns)
for column in column_list:
try:
result_columns -= set([column])
except:
continue
return result_columns
def predict_with_given_model(model, model_name, data, trained_column, used_column, data_name):
print('\n', model_name, ' Model started:')
if model_name in ['persistance','SARIMAX']:
splitted_data = split(data = data, diff_faktor= 0, rel_faktor = 0.9 )
elif model_name in ['ARIMA', 'ARMA']:
diff_df = decomposition.observed.diff(diff_faktor)
diff_df.dropna(inplace=True)
splitted_data = split(data = diff_df, diff_faktor= 0, rel_faktor = 0.9 )
result = {'Used Model':model_name, 'Trained column':trained_column,
'RMSE':None, 'Predicted column':used_column, 'Pred DataFrame':data_name}
test_length = len(splitted_data['Test'])
splited_length = 1
for i in tqdm(range(splited_length)):
# predict
warnings.filterwarnings('ignore')
yhat = model.predict(1,test_length)
obs = splitted_data['Test']
res = np.concatenate((yhat.reshape(-1,1),obs), axis = 1)
subresults = pd.DataFrame(res, columns = ['Predicted', 'Expected'])
result['RMSE'] = np.round(mean_squared_error(obs, yhat, squared = False),2)
print('RMSE: %.3f' % result['RMSE'])
warnings.filterwarnings('default')
final_result = pd.DataFrame.from_dict(result, orient = 'index').T
return final_result, subresults
if __name__ == '__main__':
set_working_directory()
monthly_given_list = load_transform_excel(MONTH_DATA_PATH)
monthly_names_given_list = ['aut', 'eigVkSt', 'privat', 'app']
agg_monthly_names_list = ['einz_aut', 'einz_eigVkSt', 'einz_privat', 'einz_bus', 'einz_app',
'tages_aut', 'tages_eigVkSt', 'tages_privat', 'tages_bus', 'tages_app']
try:
ran
except:
data_frame = load_transform_excel(ORIG_DATA_PATH)
combined_m_df, combined_df = combine_dataframe(data_frame, monthly = True)
monthly_list = list()
for df in data_frame:
monthly_list .append(monthly_aggregate(df, combined = True))
monthly_dict = create_dict_from_monthly(monthly_given_list, monthly_names_given_list, monthly_list, agg_monthly_names_list)
ran = True
one_step = False
predict_pretrained = True
# data_list = monthly_dict.values()
# data_names_list = monthly_dict.keys()
data_list = data_frame[:]
data_list.append(combined_df)
data_names_list = ['df_0_einzel_aut', 'df_1_einzel_eigVkSt', 'df_2_einzel_privat',
'df_3_einzel_bus', 'df_4_einzel_app', 'df_5_tages_aut',
'df_6_tages_eigVkSt', 'df_7_tages_privat', 'df_8_tages_bus',
'df_9_tages_app', 'combined_df']
data_names_list = data_names_list [10:]
data_list = data_list[10:]
data_to_use = combined_df
used_column = combined_df.columns[0]
# models = ['persistance', 'ARMA', 'ARIMA', 'SARIMAX']
models = ['SARIMAX']
rolling_window = 7
diff_faktor = 7
Path_to_models = os.path.join(MODELS_PATH, 'more_steps/Einzel/', DATA , '')
# used_column = 'Einzel'
if not predict_pretrained:
result_list = list()
for model in models:
temp, pred = compare_models(given_model = model , data = data_to_use[used_column],
diff_faktor = diff_faktor, rolling_window = rolling_window, forecast_one_step = one_step)
temp['name'] = data_name
temp['used_column'] = used_column
result_list.append(temp)
# if model != 'persistance':
# temp['Model'].save(SAVE_MODELS_PATH + model + '_' + DATA +'.pkl')
best_model = print_best_result(result_list)
plt.plot(pred[1], label = 'orig')
plt.plot(pred[0], label = 'pred')
# plt.plot(pred[pred.columns[1]], label = pred.columns[1])
# plt.plot(pred[pred.columns[0]], label = pred.columns[0])
plt.title(model + ' Plot of predicted results with RMSE: ' + str(temp['RMSE']))
plt.legend(loc='best')
save_fig('sarimax_results_plot', SAVE_PLOTS_RESULTS_PATH_BASE)
# result = pd.DataFrame(result_list)
# result.loc[result.shape[0]] = best_model
# result.to_csv(SAVE_MODELS_PATH + 'Baseline_results.csv')
# train_size = int(data_to_use.shape[0]*0.9)
# if best_model['Used Model'] == 'ARMA':
# best_model['Model'].plot_predict(train_size-30, train_size+20)
else:
final_res = | pd.DataFrame(columns = ['Used Model', 'Trained column', 'RMSE', 'Predicted column', 'Pred DataFrame']) | pandas.DataFrame |
import sys
import pandas as pd
from sqlalchemy import *
def load_data(messages_filepath, categories_filepath):
'''
load the data set from the csv file and convert it to pandas
dataframe and combine the two data frame
Argument :
messages_filepath - path of the csv file disaster_messages.csv
categories_filepath - path of the csv file disaster_categories.csv
return :
df - uncleaned data frame
'''
# load messages and categories datasets
messages = pd.read_csv('disaster_messages.csv')
categories = pd.read_csv('disaster_categories.csv')
# merge datasets
df = | pd.merge(messages,categories, on="id") | pandas.merge |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.patheffects as pe
from .sanker import Sanker
import imageio
class Visualizer():
def __init__(self, district_list, private_list, city_list, contract_list, bank_list, leiu_list):
self.district_list = district_list.copy()
self.private_list = private_list.copy()
for x in city_list:
self.private_list.append(x)
self.contract_list = contract_list
self.bank_list = bank_list
self.leiu_list = leiu_list
self.private_districts = {}
for x in self.private_list:
self.private_districts[x.name] = []
for xx in x.district_list:
self.private_districts[x.name].append(xx)
inflow_inputs = pd.read_csv('calfews_src/data/input/calfews_src-data.csv', index_col=0, parse_dates=True)
x2_results = pd.read_csv('calfews_src/data/input/x2DAYFLOW.csv', index_col=0, parse_dates=True)
self.observations = inflow_inputs.join(x2_results)
self.observations['delta_outflow'] = self.observations['delta_inflow'] + self.observations['delta_depletions'] - self.observations['HRO_pump'] - self.observations['TRP_pump']
self.index_o = self.observations.index
self.T_o = len(self.observations)
self.day_month_o = self.index_o.day
self.month_o = self.index_o.month
self.year_o = self.index_o.year
kern_bank_observations = pd.read_csv('calfews_src/data/input/kern_water_bank_historical.csv')
kern_bank_observations = kern_bank_observations.set_index('Year')
semitropic_bank_observations = pd.read_csv('calfews_src/data/input/semitropic_bank_historical.csv')
semitropic_bank_observations = semitropic_bank_observations.set_index('Year')
total_bank_kwb = np.zeros(self.T_o)
total_bank_smi = np.zeros(self.T_o)
for x in range(0, self.T_o):
if self.month_o[x] > 9:
year_str = self.year_o[x]
else:
year_str = self.year_o[x] - 1
if self.month_o[x] == 9 and self.day_month_o[x] == 30:
year_str = self.year_o[x]
total_bank_kwb[x] = kern_bank_observations.loc[year_str, 'Ag'] + kern_bank_observations.loc[year_str, 'Mixed Purpose']
deposit_history = semitropic_bank_observations[semitropic_bank_observations.index <= year_str]
total_bank_smi[x] = deposit_history['Metropolitan'].sum() + deposit_history['South Bay'].sum()
self.observations['kwb_accounts'] = pd.Series(total_bank_kwb, index=self.observations.index)
self.observations['smi_accounts'] = pd.Series(total_bank_smi, index=self.observations.index)
def get_results_sensitivity_number(self, results_file, sensitivity_number, start_month, start_year, start_day):
self.values = {}
numdays_index = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
with h5py.File(results_file, 'r') as f:
data = f['s' + sensitivity_number]
names = data.attrs['columns']
names = list(map(lambda x: str(x).split("'")[1], names))
df_data = pd.DataFrame(data[:], columns=names)
for x in df_data:
self.values[x] = df_data[x]
datetime_index = []
monthcount = start_month
yearcount = start_year
daycount = start_day
leapcount = np.remainder(start_year, 4)
for t in range(0, len(self.values[x])):
datetime_index.append(str(yearcount) + '-' + str(monthcount) + '-' + str(daycount))
daycount += 1
if leapcount == 0 and monthcount == 2:
numdays_month = numdays_index[monthcount - 1] + 1
else:
numdays_month = numdays_index[monthcount - 1]
if daycount > numdays_month:
daycount = 1
monthcount += 1
if monthcount == 13:
monthcount = 1
yearcount += 1
leapcount += 1
if leapcount == 4:
leapcount = 0
self.values['Datetime'] = pd.to_datetime(datetime_index)
self.values = pd.DataFrame(self.values)
self.values = self.values.set_index('Datetime')
self.index = self.values.index
self.T = len(self.values.index)
self.day_year = self.index.dayofyear
self.day_month = self.index.day
self.month = self.index.month
self.year = self.index.year
self.starting_year = self.index.year[0]
self.ending_year = self.index.year[-1]
self.number_years = self.ending_year - self.starting_year
total_kwb_sim = np.zeros(len(self.values))
total_smi_sim = np.zeros(len(self.values))
for district_partner in ['DLR', 'KCWA', 'ID4', 'SMI', 'TJC', 'WON', 'WRM']:
total_kwb_sim += self.values['kwb_' + district_partner]
self.values['kwb_total'] = pd.Series(total_kwb_sim, index = self.values.index)
for district_partner in ['SOB', 'MET']:
total_smi_sim += self.values['semitropic_' + district_partner]
self.values['smi_total'] = pd.Series(total_smi_sim, index = self.values.index)
def set_figure_params(self):
self.figure_params = {}
self.figure_params['delta_pumping'] = {}
self.figure_params['delta_pumping']['extended_simulation'] = {}
self.figure_params['delta_pumping']['extended_simulation']['outflow_list'] = ['delta_outflow', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['pump1_list'] = ['delta_HRO_pump', 'HRO_pump']
self.figure_params['delta_pumping']['extended_simulation']['pump2_list'] = ['delta_TRP_pump', 'TRP_pump']
self.figure_params['delta_pumping']['extended_simulation']['scenario_labels'] = ['Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['simulation_labels'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['observation_labels'] = ['HRO_pump', 'TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['agg_list'] = ['AS-OCT', 'AS-OCT', 'D']
self.figure_params['delta_pumping']['extended_simulation']['unit_mult'] = [1.0, 1.0, cfs_tafd]
self.figure_params['delta_pumping']['extended_simulation']['max_value_list'] = [5000, 5000, 15]
self.figure_params['delta_pumping']['extended_simulation']['use_log_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['use_cdf_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['scenario_type_list'] = ['observation', 'validation', 'scenario']
self.figure_params['delta_pumping']['extended_simulation']['x_label_list'] = ['Total Pumping, SWP Delta Pumps (tAF/year)', 'Total Pumping, CVP Delta Pumps (tAF/year)', 'Daily Exceedence Probability', '']
self.figure_params['delta_pumping']['extended_simulation']['y_label_list'] = ['Probability Density', 'Probability Density', 'Daily Delta Outflow (tAF)', 'Relative Frequency of Water-year Types within Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names1'] = ['Historical (1996-2016) Observations', 'Historical (1996-2016) Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names2'] = ['Critical', 'Dry', 'Below Normal', 'Above Normal', 'Wet']
self.figure_params['state_estimation'] = {}
for x in ['publication', 'sacramento', 'sanjoaquin', 'tulare']:
self.figure_params['state_estimation'][x] = {}
self.figure_params['state_estimation'][x]['non_log'] = ['Snowpack (SWE)',]
self.figure_params['state_estimation'][x]['predictor values'] = ['Mean Inflow, Prior 30 Days (tAF/day)','Snowpack (SWE)']
self.figure_params['state_estimation'][x]['colorbar_label_index'] = [0, 30, 60, 90, 120, 150, 180]
self.figure_params['state_estimation'][x]['colorbar_label_list'] = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr']
self.figure_params['state_estimation'][x]['subplot_annotations'] = ['A', 'B', 'C', 'D']
self.figure_params['state_estimation'][x]['forecast_periods'] = [30,'SNOWMELT']
self.figure_params['state_estimation'][x]['all_cols'] = ['DOWY', 'Snowpack', '30MA']
self.figure_params['state_estimation'][x]['forecast_values'] = []
for forecast_days in self.figure_params['state_estimation'][x]['forecast_periods']:
if forecast_days == 'SNOWMELT':
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Snowmelt Season (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append('Snowmelt Flow')
else:
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Next ' + str(forecast_days) + ' Days (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append(str(forecast_days) + ' Day Flow')
self.figure_params['state_estimation']['publication']['watershed_keys'] = ['SHA', 'ORO', 'MIL', 'ISB']
self.figure_params['state_estimation']['publication']['watershed_labels'] = ['Shasta', 'Oroville', 'Millerton', 'Isabella']
self.figure_params['state_estimation']['sacramento']['watershed_keys'] = ['SHA', 'ORO', 'FOL', 'YRS']
self.figure_params['state_estimation']['sacramento']['watershed_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar']
self.figure_params['state_estimation']['sanjoaquin']['watershed_keys'] = ['NML', 'DNP', 'EXC', 'MIL']
self.figure_params['state_estimation']['sanjoaquin']['watershed_labels'] = ['New Melones', '<NAME>', 'Exchequer', 'Millerton']
self.figure_params['state_estimation']['tulare']['watershed_keys'] = ['PFT', 'KWH', 'SUC', 'ISB']
self.figure_params['state_estimation']['tulare']['watershed_labels'] = ['Pine Flat', 'Kaweah', 'Success', 'Isabella']
self.figure_params['model_validation'] = {}
for x in ['delta', 'sierra', 'sanluis', 'bank']:
self.figure_params['model_validation'][x] = {}
self.figure_params['model_validation']['delta']['title_labels'] = ['State Water Project Pumping', 'Central Valley Project Pumping', 'Delta X2 Location']
num_subplots = len(self.figure_params['model_validation']['delta']['title_labels'])
self.figure_params['model_validation']['delta']['label_name_1'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_x2']
self.figure_params['model_validation']['delta']['label_name_2'] = ['HRO_pump', 'TRP_pump', 'DAY_X2']
self.figure_params['model_validation']['delta']['unit_converstion_1'] = [1.0, 1.0, 1.0]
self.figure_params['model_validation']['delta']['unit_converstion_2'] = [cfs_tafd, cfs_tafd, 1.0]
self.figure_params['model_validation']['delta']['y_label_timeseries'] = ['Pumping (tAF/week)', 'Pumping (tAF/week)', 'X2 inland distance (km)']
self.figure_params['model_validation']['delta']['y_label_scatter'] = ['(tAF/yr)', '(tAF/yr)', '(km)']
self.figure_params['model_validation']['delta']['timeseries_timestep'] = ['W', 'W', 'W']
self.figure_params['model_validation']['delta']['scatter_timestep'] = ['AS-OCT', 'AS-OCT', 'M']
self.figure_params['model_validation']['delta']['aggregation_methods'] = ['sum', 'sum', 'mean']
self.figure_params['model_validation']['delta']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['delta']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['sierra']['title_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar', 'New Melones', '<NAME>', 'Exchequer', 'Millerton', 'Pine Flat', 'Kaweah', 'Success', 'Isabella']
num_subplots = len(self.figure_params['model_validation']['sierra']['title_labels'])
self.figure_params['model_validation']['sierra']['label_name_1'] = ['shasta_S', 'oroville_S', 'folsom_S', 'yuba_S', 'newmelones_S', 'donpedro_S', 'exchequer_S', 'millerton_S', 'pineflat_S', 'kaweah_S', 'success_S', 'isabella_S']
self.figure_params['model_validation']['sierra']['label_name_2'] = ['SHA_storage', 'ORO_storage', 'FOL_storage', 'YRS_storage', 'NML_storage', 'DNP_storage', 'EXC_storage', 'MIL_storage', 'PFT_storage', 'KWH_storage', 'SUC_storage', 'ISB_storage']
self.figure_params['model_validation']['sierra']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sierra']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_scatter'] = []
self.figure_params['model_validation']['sierra']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sierra']['scatter_timestep'] = []
self.figure_params['model_validation']['sierra']['aggregation_methods'] = ['mean'] * num_subplots
self.figure_params['model_validation']['sierra']['notation_location'] = ['bottom'] * num_subplots
self.figure_params['model_validation']['sierra']['show_legend'] = [False] * num_subplots
counter_kaweah = self.figure_params['model_validation']['sierra']['title_labels'].index('Kaweah')
counter_success = self.figure_params['model_validation']['sierra']['title_labels'].index('Success')
counter_isabella = self.figure_params['model_validation']['sierra']['title_labels'].index('Isabella')
self.figure_params['model_validation']['sierra']['notation_location'][counter_kaweah] = 'top'
self.figure_params['model_validation']['sierra']['notation_location'][counter_success] = 'topright'
self.figure_params['model_validation']['sierra']['show_legend'][counter_isabella] = True
self.figure_params['model_validation']['sanluis']['title_labels'] = ['State (SWP) Portion, San Luis Reservoir', 'Federal (CVP) Portion, San Luis Reservoir']
num_subplots = len(self.figure_params['model_validation']['sanluis']['title_labels'])
self.figure_params['model_validation']['sanluis']['label_name_1'] = ['sanluisstate_S', 'sanluisfederal_S']
self.figure_params['model_validation']['sanluis']['label_name_2'] = ['SLS_storage', 'SLF_storage']
self.figure_params['model_validation']['sanluis']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sanluis']['scatter_timestep'] = ['M'] * num_subplots
self.figure_params['model_validation']['sanluis']['aggregation_methods'] = ['point'] * num_subplots
self.figure_params['model_validation']['sanluis']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['sanluis']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['bank']['title_labels'] = ['Kern Water Bank Accounts', 'Semitropic Water Bank Accounts']
num_subplots = len(self.figure_params['model_validation']['bank']['title_labels'])
self.figure_params['model_validation']['bank']['label_name_1'] = ['kwb_total', 'smi_total']
self.figure_params['model_validation']['bank']['label_name_2'] = ['kwb_accounts', 'smi_accounts']
self.figure_params['model_validation']['bank']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['bank']['unit_converstion_2'] = [1.0/1000000.0, 1.0/1000.0]
self.figure_params['model_validation']['bank']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['bank']['scatter_timestep'] = ['AS-OCT'] * num_subplots
self.figure_params['model_validation']['bank']['aggregation_methods'] = ['change'] * num_subplots
self.figure_params['model_validation']['bank']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'] = [False] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'][0] = True
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_losthills'] = {}
self.figure_params['state_response']['sanluisstate_losthills']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_losthills']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_losthills']['groundwater_account_names'] = ['LHL','WON']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'steelblue']
self.figure_params['state_response']['sanluisstate_losthills']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_losthills']['subplot_titles'] = ['State Water Project Delta Operations', 'Lost Hills Drought Management', 'San Luis Reservoir Operations', 'Lost Hills Flood Management']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharged from Contract Allocation' 'Recharge of Uncontrolled Flood Spills']
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_wheeler'] = {}
self.figure_params['state_response']['sanluisstate_wheeler']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_wheeler']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_wheeler']['groundwater_account_names'] = ['WRM']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'lightsteelblue']
self.figure_params['state_response']['sanluisstate_wheeler']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_wheeler']['subplot_titles'] = ['State Water Project Delta Operations', 'Wheeler Ridge Drought Management', 'San Luis Reservoir Operations', 'Wheeler Ridge Flood Management']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharge of Uncontrolled Flood Spills', 'Recharged from Contract Allocation']
self.figure_params['district_water_use'] = {}
self.figure_params['district_water_use']['physical'] = {}
self.figure_params['district_water_use']['physical']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors', 'Groundwater Banks']
self.figure_params['district_water_use']['physical']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['physical']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler', 'northkern', 'kerntulare']
self.figure_params['district_water_use']['physical']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['physical']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['physical']['Groundwater Banks'] = ['stockdale', 'kernriverbed', 'poso', 'pioneer', 'kwb', 'b2800', 'irvineranch', 'northkernwb']
self.figure_params['district_water_use']['physical']['subplot columns'] = 2
self.figure_params['district_water_use']['physical']['color map'] = 'YlGbBu_r'
self.figure_params['district_water_use']['physical']['write file'] = True
self.figure_params['district_water_use']['annual'] = {}
self.figure_params['district_water_use']['annual']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors']
self.figure_params['district_water_use']['annual']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['annual']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler']
self.figure_params['district_water_use']['annual']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['annual']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['annual']['subplot columns'] = 2
self.figure_params['district_water_use']['annual']['color map'] = 'BrBG_r'
self.figure_params['district_water_use']['annual']['write file'] = True
self.figure_params['flow_diagram'] = {}
self.figure_params['flow_diagram']['tulare'] = {}
self.figure_params['flow_diagram']['tulare']['column1'] = ['Shasta', 'Folsom', 'Oroville', 'New Bullards', 'Uncontrolled']
self.figure_params['flow_diagram']['tulare']['row1'] = ['Delta Outflow', 'Carryover',]
self.figure_params['flow_diagram']['tulare']['column2'] = ['San Luis (Fed)', 'San Luis (State)', 'Millerton', 'Isabella', 'Pine Flat', 'Kaweah', 'Success']
self.figure_params['flow_diagram']['tulare']['row2'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column3'] = ['Exchange', 'CVP-Delta', 'Cross Valley', 'State Water Project', 'Friant Class 1','Friant Class 2', 'Kern River', 'Kings River', 'Kaweah River', 'Tule River', 'Flood']
self.figure_params['flow_diagram']['tulare']['row3'] = ['Private Pumping', 'GW Banks']
self.figure_params['flow_diagram']['tulare']['column4'] = ['Exchange', 'CVP-Delta', 'Urban', 'KCWA', 'CVP-Friant','Other']
self.figure_params['flow_diagram']['tulare']['row4'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column5'] = ['Irrigation', 'Urban', 'In-Lieu Recharge', 'Direct Recharge']
self.figure_params['flow_diagram']['tulare']['titles'] = ['Sacramento Basin\nSupplies', 'Tulare Basin\nSupplies', 'Surface Water\nContract Allocations', 'Contractor Groups', 'Water Use Type']
def scenario_compare(self, folder_name, figure_name, plot_name, validation_values, show_plot):
outflow_list = self.figure_params[figure_name][plot_name]['outflow_list']
pump1_list = self.figure_params[figure_name][plot_name]['pump1_list']
pump2_list = self.figure_params[figure_name][plot_name]['pump2_list']
scenario_labels = self.figure_params[figure_name][plot_name]['scenario_labels']
simulation_labels = self.figure_params[figure_name][plot_name]['simulation_labels']
observation_labels = self.figure_params[figure_name][plot_name]['observation_labels']
agg_list = self.figure_params[figure_name][plot_name]['agg_list']
unit_mult = self.figure_params[figure_name][plot_name]['unit_mult']
max_value_list = self.figure_params[figure_name][plot_name]['max_value_list']
use_log_list = self.figure_params[figure_name][plot_name]['use_log_list']
use_cdf_list = self.figure_params[figure_name][plot_name]['use_cdf_list']
scenario_type_list = self.figure_params[figure_name][plot_name]['scenario_type_list']
x_label_list = self.figure_params[figure_name][plot_name]['x_label_list']
y_label_list = self.figure_params[figure_name][plot_name]['y_label_list']
legend_label_names1 = self.figure_params[figure_name][plot_name]['legend_label_names1']
legend_label_names2 = self.figure_params[figure_name][plot_name]['legend_label_names2']
color1 = sns.color_palette('spring', n_colors = 3)
color2 = sns.color_palette('summer', n_colors = 3)
color_list = np.array([color1[0], color1[2], color2[0]])
max_y_val = np.zeros(len(simulation_labels))
fig = plt.figure(figsize = (20, 16))
gs = gridspec.GridSpec(3,2, width_ratios=[3,1], figure = fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[:, 1])
axes_list = [ax1, ax2, ax3]
counter = 0
for sim_label, obs_label, agg, max_value, use_log, use_cdf, ax_loop in zip(simulation_labels, observation_labels, agg_list, max_value_list, use_log_list, use_cdf_list, axes_list):
data_type_dict = {}
data_type_dict['scenario'] = self.values[sim_label].resample(agg).sum() * unit_mult[0]
data_type_dict['validation'] = validation_values[sim_label].resample(agg).sum() * unit_mult[1]
data_type_dict['observation'] = self.observations[obs_label].resample(agg).sum() * unit_mult[2]
if use_log:
for scen_type in scenario_type_list:
values_int = data_type_dict[scen_type]
data_type_dict[scen_type] = np.log(values_int[values_int > 0])
for scen_type in scenario_type_list:
max_y_val[counter] = max([max(data_type_dict[scen_type]), max_y_val[counter]])
counter += 1
if use_cdf:
for scen_type, color_loop in zip(scenario_type_list, color_list):
cdf_values = np.zeros(100)
values_int = data_type_dict[scen_type]
for x in range(0, 100):
x_val = int(np.ceil(max_value)) * (x/100)
cdf_values[x] = len(values_int[values_int > x_val])/len(values_int)
ax_loop.plot(cdf_values, np.arange(0, int(np.ceil(max_value)), int(np.ceil(max_value))/100), linewidth = 3, color = color_loop)
else:
pos = np.linspace(0, max_value, 101)
for scen_type, color_loop in zip(scenario_type_list, color_list):
kde_est = stats.gaussian_kde(data_type_dict[scen_type])
ax_loop.fill_between(pos, kde_est(pos), edgecolor = 'black', alpha = 0.6, facecolor = color_loop)
sri_dict = {}
sri_dict['validation'] = validation_values['delta_forecastSRI']
sri_dict['scenario'] = self.values['delta_forecastSRI']
sri_cutoffs = {}
sri_cutoffs['W'] = [9.2, 100]
sri_cutoffs['AN'] = [7.8, 9.2]
sri_cutoffs['BN'] = [6.6, 7.8]
sri_cutoffs['D'] = [5.4, 6.6]
sri_cutoffs['C'] = [0.0, 5.4]
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
scenario_type_list = ['validation', 'scenario']
colors = sns.color_palette('RdBu_r', n_colors = 5)
percent_years = {}
for wyt in wyt_list:
percent_years[wyt] = np.zeros(len(scenario_type_list))
for scen_cnt, scen_type in enumerate(scenario_type_list):
ann_sri = []
for x_cnt, x in enumerate(sri_dict[scen_type]):
if sri_dict[scen_type].index.month[x_cnt] == 9 and sri_dict[scen_type].index.day[x_cnt] == 30:
ann_sri.append(x)
ann_sri = np.array(ann_sri)
for x_cnt, wyt in enumerate(wyt_list):
mask_value = (ann_sri >= sri_cutoffs[wyt][0]) & (ann_sri < sri_cutoffs[wyt][1])
percent_years[wyt][scen_cnt] = len(ann_sri[mask_value])/len(ann_sri)
colors = sns.color_palette('RdBu_r', n_colors = 5)
last_type = np.zeros(len(scenario_type_list))
for cnt, x in enumerate(wyt_list):
ax4.bar(['Validated Period\n(1997-2016)', 'Extended Simulation\n(1906-2016)'], percent_years[x], alpha = 1.0, label = wyt, facecolor = colors[cnt], edgecolor = 'black', bottom = last_type)
last_type += percent_years[x]
ax1.set_xlim([0.0, 500.0* np.ceil(max_y_val[0]/500.0)])
ax2.set_xlim([0.0, 500.0* np.ceil(max_y_val[1]/500.0)])
ax3.set_xlim([0.0, 1.0])
ax4.set_ylim([0, 1.15])
ax1.set_yticklabels('')
ax2.set_yticklabels('')
label_list = []
loc_list = []
for value_x in range(0, 120, 20):
label_list.append(str(value_x) + ' %')
loc_list.append(value_x/100.0)
ax4.set_yticklabels(label_list)
ax4.set_yticks(loc_list)
ax3.set_xticklabels(label_list)
ax3.set_xticks(loc_list)
ax3.set_yticklabels(['4', '8', '16', '32', '64', '125', '250', '500', '1000', '2000', '4000'])
ax3.set_yticks([np.log(4), np.log(8), np.log(16), np.log(32), np.log(64), np.log(125), np.log(250), np.log(500), np.log(1000), np.log(2000), np.log(4000)])
ax3.set_ylim([np.log(4), np.log(4000)])
for ax, x_lab, y_lab in zip([ax1, ax2, ax3, ax4], x_label_list, y_label_list):
ax.set_xlabel(x_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.set_ylabel(y_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.grid(False)
for tick in ax.get_xticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
for tick in ax.get_yticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
legend_elements = []
for x_cnt, x in enumerate(legend_label_names1):
legend_elements.append(Patch(facecolor = color_list[x_cnt], edgecolor = 'black', label = x))
ax1.legend(handles = legend_elements, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
legend_elements_2 = []
for x_cnt, x in enumerate(legend_label_names2):
legend_elements_2.append(Patch(facecolor = colors[x_cnt], edgecolor = 'black', label = x))
ax4.legend(handles = legend_elements_2, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
plt.savefig(folder_name + figure_name + '_' + plot_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def make_deliveries_by_district(self, folder_name, figure_name, plot_name, scenario_name, show_plot):
if plot_name == 'annual':
name_bridge = {}
name_bridge['semitropic'] = 'KER01'
name_bridge['westkern'] = 'KER02'
name_bridge['wheeler'] = 'KER03'
name_bridge['kerndelta'] = 'KER04'
name_bridge['arvin'] = 'KER05'
name_bridge['belridge'] = 'KER06'
name_bridge['losthills'] = 'KER07'
name_bridge['northkern'] = 'KER08'
name_bridge['northkernwb'] = 'KER08'
name_bridge['ID4'] = 'KER09'
name_bridge['sosanjoaquin'] = 'KER10'
name_bridge['berrenda'] = 'KER11'
name_bridge['buenavista'] = 'KER12'
name_bridge['cawelo'] = 'KER13'
name_bridge['rosedale'] = 'KER14'
name_bridge['shaffer'] = 'KER15'
name_bridge['henrymiller'] = 'KER16'
name_bridge['kwb'] = 'KER17'
name_bridge['b2800'] = 'KER17'
name_bridge['pioneer'] = 'KER17'
name_bridge['irvineranch'] = 'KER17'
name_bridge['kernriverbed'] = 'KER17'
name_bridge['poso'] = 'KER17'
name_bridge['stockdale'] = 'KER17'
name_bridge['delano'] = 'KeT01'
name_bridge['kerntulare'] = 'KeT02'
name_bridge['lowertule'] = 'TUL01'
name_bridge['tulare'] = 'TUL02'
name_bridge['lindmore'] = 'TUL03'
name_bridge['saucelito'] = 'TUL04'
name_bridge['porterville'] = 'TUL05'
name_bridge['lindsay'] = 'TUL06'
name_bridge['exeter'] = 'TUL07'
name_bridge['terra'] = 'TUL08'
name_bridge['teapot'] = 'TUL09'
name_bridge['bakersfield'] = 'BAK'
name_bridge['fresno'] = 'FRE'
name_bridge['southbay'] = 'SOB'
name_bridge['socal'] = 'SOC'
name_bridge['tehachapi'] = 'TEH'
name_bridge['tejon'] = 'TEJ'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'PIX'
name_bridge['chowchilla'] = 'CHW'
name_bridge['maderairr'] = 'MAD'
name_bridge['fresnoid'] = 'FSI'
name_bridge['westlands'] = 'WTL'
name_bridge['panoche'] = 'PAN'
name_bridge['sanluiswater'] = 'SLW'
name_bridge['delpuerto'] = 'DEL'
elif plot_name == 'monthly':
name_bridge = {}
name_bridge['semitropic'] = 'Semitropic Water Storage District'
name_bridge['westkern'] = 'West Kern Water District'
name_bridge['wheeler'] = 'Wheeler Ridge-Maricopa Water Storage District'
name_bridge['kerndelta'] = 'Kern Delta Water District'
name_bridge['arvin'] = 'Arvin-Edison Water Storage District'
name_bridge['belridge'] = 'Belridge Water Storage District'
name_bridge['losthills'] = 'Lost Hills Water District'
name_bridge['northkern'] = 'North Kern Water Storage District'
name_bridge['northkernwb'] = 'North Kern Water Storage District'
name_bridge['ID4'] = 'Urban'
name_bridge['sosanjoaquin'] = 'Southern San Joaquin Municipal Utility District'
name_bridge['berrenda'] = 'Berrenda Mesa Water District'
name_bridge['buenavista'] = 'Buena Vista Water Storage District'
name_bridge['cawelo'] = 'Cawelo Water District'
name_bridge['rosedale'] = 'Rosedale-Rio Bravo Water Storage District'
name_bridge['shaffer'] = 'Shafter-Wasco Irrigation District'
name_bridge['henrymiller'] = 'Henry Miller Water District'
name_bridge['kwb'] = 'Kern Water Bank Authority'
name_bridge['b2800'] = 'Kern Water Bank Authority'
name_bridge['pioneer'] = 'Kern Water Bank Authority'
name_bridge['irvineranch'] = 'Kern Water Bank Authority'
name_bridge['kernriverbed'] = 'Kern Water Bank Authority'
name_bridge['poso'] = 'Kern Water Bank Authority'
name_bridge['stockdale'] = 'Kern Water Bank Authority'
name_bridge['delano'] = 'Delano-Earlimart Irrigation District'
name_bridge['kerntulare'] = 'Kern-Tulare Water District'
name_bridge['lowertule'] = 'Lower Tule River Irrigation District'
name_bridge['tulare'] = 'Tulare Irrigation District'
name_bridge['lindmore'] = 'Lindmore Irrigation District'
name_bridge['saucelito'] = 'Saucelito Irrigation District'
name_bridge['porterville'] = 'Porterville Irrigation District'
name_bridge['lindsay'] = 'Lindsay-Strathmore Irrigation District'
name_bridge['exeter'] = 'Exeter Irrigation District'
name_bridge['terra'] = 'Terra Bella Irrigation District'
name_bridge['teapot'] = 'Tea Pot Dome Water District'
name_bridge['bakersfield'] = 'Urban'
name_bridge['fresno'] = 'Urban'
name_bridge['southbay'] = 'Urban'
name_bridge['socal'] = 'Urban'
name_bridge['tehachapi'] = 'Tehachapi - Cummings County Water District'
name_bridge['tejon'] = 'Tejon-Castac Water District'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'Pixley Irrigation District'
name_bridge['chowchilla'] = 'Chowchilla Water District'
name_bridge['maderairr'] = 'Madera Irrigation District'
name_bridge['fresnoid'] = 'Fresno Irrigation District'
name_bridge['westlands'] = 'Westlands Water District'
name_bridge['panoche'] = 'Panoche Water District'
name_bridge['sanluiswater'] = 'San Luis Water District'
name_bridge['delpuerto'] = 'Del Puerto Water District'
name_bridge['alta'] = 'Alta Irrigation District'
name_bridge['consolidated'] = 'Consolidated Irrigation District'
location_type = plot_name
self.total_irrigation = {}
self.total_recharge = {}
self.total_pumping = {}
self.total_flood_purchases = {}
self.total_recovery_rebate = {}
self.total_recharge_sales = {}
self.total_recharge_purchases = {}
self.total_recovery_sales = {}
self.total_recovery_purchases = {}
for bank in self.bank_list:
self.total_irrigation[bank.name] = np.zeros(self.number_years*12)
self.total_recharge[bank.name] = np.zeros(self.number_years*12)
self.total_pumping[bank.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[bank.name] = np.zeros(self.number_years*12)
for district in self.district_list:
self.total_irrigation[district.name] = np.zeros(self.number_years*12)
self.total_recharge[district.name] = np.zeros(self.number_years*12)
self.total_pumping[district.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[district.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[district.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[district.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[district.name] = np.zeros(self.number_years*12)
date_list_labels = []
for year_num in range(self.starting_year, 2017):
start_month = 1
end_month = 13
if year_num == self.starting_year:
start_month = 10
if year_num == 2016:
end_month = 10
for month_num in range(start_month, end_month):
date_string_start = str(year_num) + '-' + str(month_num) + '-01'
date_list_labels.append(date_string_start)
for district in self.district_list:
inleiu_name = district.name + '_inleiu_irrigation'
inleiu_recharge_name = district.name + '_inleiu_recharge'
direct_recover_name = district.name + '_recover_banked'
indirect_surface_name = district.name + '_exchanged_SW'
indirect_ground_name = district.name + '_exchanged_GW'
inleiu_pumping_name = district.name + '_leiupumping'
pumping_name = district.name + '_pumping'
recharge_name = district.name + '_' + district.key + '_recharged'
numdays_month = [31, 28, 31, 30, 31, 30, 31, 31, 29, 31, 30, 31]
for year_num in range(0, self.number_years+1):
year_str = str(year_num + self.starting_year)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year - 1)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by physical location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries
for contract in self.contract_list:
delivery_name = district.name + '_' + contract.name + '_delivery'
recharge_contract_name = district.name + '_' + contract.name + '_recharged'
flood_irr_name = district.name + '_' + contract.name + '_flood_irrigation'
flood_name = district.name + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_irr_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
if flood_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_name].values[0]
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumping (daily values aggregated by year)
if pumping_name in self.values:
annual_pumping = 0.0
for x in range(0, len(self.index)):
monthly_index = (self.year[x] - self.starting_year)*12 + self.month[x] - 10
if self.day_month[x] == 1:
self.total_pumping[district.name][monthly_index] += annual_pumping
annual_pumping = 0.0
else:
annual_pumping += self.values.loc[self.index[x], pumping_name]
self.total_pumping[district.name][-1] += annual_pumping
#Get values for any private entities within the district
for private_name in self.private_list:
private = private_name.name
if district.key in self.private_districts[private]:
inleiu_name = private + '_' + district.key + '_inleiu_irrigation'
inleiu_recharge_name = private + '_' + district.key + '_inleiu_irrigation'
direct_recover_name = private + '_' + district.key + '_recover_banked'
indirect_surface_name = private + '_' + district.key + '_exchanged_SW'
indirect_ground_name = private + '_' + district.key + '_exchanged_GW'
inleiu_pumping_name = private + '_' + district.key + '_leiupumping'
pumping_name = private + '_' + district.key + '_pumping'
recharge_name = private + '_' + district.key + '_' + district.key + '_recharged'
for year_num in range(0, self.number_years - 1):
year_str = str(year_num + self.starting_year + 1)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years - 1:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year + 1)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[ | pd.DatetimeIndex([date_string_current]) | pandas.DatetimeIndex |
'''
Detecting the best features among ~600 new features.
INPUT FILES:
Train(i).csv (The new features of train set; made by Arman)
Test(j).csv (the new features of test set; made by Arman)
OUTPUTS:
newFeatTrain.csv (a file that only has the most relevant features)
newFeatTest.csv (a file that only has the most relevant features)
__Authors__:
<NAME>, <NAME>
__Veresion__:
1.0
'''
import numpy as np
import pandas as pd
### Reading input data:
df_ts1 = pd.read_csv('../../homedepotdata/Test1.csv')
df_ts2 = | pd.read_csv('../../homedepotdata/Test2.csv') | pandas.read_csv |
"""
Test loading of nyc_taxis with dynamic queries.
"""
import time
import pandas as pd
import progressivis.core
from progressivis.core import Scheduler, Every
from progressivis.table import Table
from progressivis.vis import MCScatterPlot
from progressivis.io import CSVLoader
#from progressivis.datasets import get_dataset
from progressivis.table.constant import Constant
import asyncio as aio
def _filter(df):
lon = df['pickup_longitude']
lat = df['pickup_latitude']
return df[(lon > -74.08) & (lon < -73.5) & (lat > 40.55) & (lat < 41.00)]
def _print_len(x):
if x is not None:
print(len(x))
#log_level() #package='progressivis.stats.histogram2d')
try:
s = scheduler
except NameError:
s = Scheduler()
#PREFIX= 'https://storage.googleapis.com/tlc-trip-data/2015/'
#SUFFIX= ''
PREFIX = '../nyc-taxi/'
SUFFIX = '.bz2'
URLS = [
PREFIX+'yellow_tripdata_2015-01.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-02.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-03.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-04.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-05.csv'+SUFFIX,
PREFIX+'yellow_tripdata_2015-06.csv'+SUFFIX,
]
FILENAMES = | pd.DataFrame({'filename': URLS}) | pandas.DataFrame |
# pandas
# creating a pandas series
# creating a series by passing a list of values, and a custom index label.
import numpy as np
import pandas as pd
s = pd.Series([1, 2, 3, np.nan, 5, 6], index=['A', 'B', 'C', 'D', 'E', 'F'])
print(s)
# creating a pandas dataframe
data = {'Gender':['F', 'M', 'M'], 'Emp_ID': ['E01', 'E02', 'E03'],
'Age': [25, 27, 25]}
# we want the order the columns, so lets specify in columns parameter
df = pd.DataFrame(data, columns=['Emp_ID', 'Gender', 'Age'])
print(df)
# reading / writing data from csv, text, Excel
# df = pd.read_csv('mtcars.csv')
# df = pd.read_csv('mtcars.txt', sep='\t')
# df = pd.read_excel('mtcars.xlsx')
# reading for multiple sheets of same Excel into different dataframes
# xlsx = pd.ExcelFile('mtcars.xlsx')
# sheet1_df = pd.read_excel(xlsx, 'Sheet1')
# sheet2_df = pd.read_excel(xlsx, 'Sheet2')
# writing
# index = False parameter will not write the index values, default is True
# df.to_csv('newFile.csv', index=False)
# df.to_csv('newFile.txt', sep='\t', index=False)
# df.to_excel('newFile.xlsx', sheet_name='1', index=False)
# basic statistics on dataframe
# describe() returns min, first quartile, median,
# third quartile, max on each column
df = pd.read_csv('iris.csv')
print(df.describe())
# cov() - Covariance indicates how two variables are related. A positive
# covariance means the variables are positively related, while a negative
# covariance means the variables are inversely related. Drawback of covariance
# is that it does not tell you the degree of positive or negative relation
# creating covariance on dataframe
df = pd.read_csv('iris.csv')
print(df.cov())
# corr() - correlation tells you the degree to which the
# variables tend to move together. You will always talk about
# correlation as a range between -1 and 1.
# creating correlation matrix on dataframe
df = pd.read_csv('iris.csv')
print(df.corr())
# merge / join
# concat or append operation
data = {
'emp_id': ['1', '2', '3', '4', '5'],
'first_name': ['Jason', 'Andy', 'Allen', 'Alice', 'Amy'],
'last_name': ['Larkin', 'Jacob', 'A', 'AA', 'Jackson']}
df_1 = pd.DataFrame(data, columns=['emp_id', 'first_name', 'last_name'])
data = {
'emp_id': ['4', '5', '6', '7'],
'first_name': ['Brian', 'Shize', 'Kim', 'Jose'],
'last_name': ['Alexander', 'Suma', 'Mike', 'G']}
df_2 = pd.DataFrame(data, columns=['emp_id', 'first_name', 'last_name'])
# using concat
df = pd.concat([df_1, df_2])
print(df)
# using append
print(df_1.append(df_2))
# join the two dataframes along columns
print(pd.concat([df_1, df_2], axis=1))
# merge two dataframes based on the emp_id value
# in this case only the emp_id's present in both table will be joined
print( | pd.merge(df_1, df_2, on='emp_id') | pandas.merge |
# Oct 21, 2019
# Kaggle challenge from https://www.kaggle.com/c/cat-in-the-dat
# according to different datatype to deal with the dataset
# accuracy: 0.8011 in test
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
catTrain = pd.read_csv("/kaggle/input/cat-in-the-dat/train.csv")
catTest = pd.read_csv("/kaggle/input/cat-in-the-dat/test.csv")
cat = pd.concat([catTrain, catTest], axis = 0, ignore_index = True, sort = False)
print(cat.shape) # number of rows and columns. (rows, cols)
print(cat.tail(5))
# see distribution of target
count_target1 = len(catTrain[catTrain["target"] == 1])
count_target0 = len(catTrain[catTrain["target"] == 0])
total = catTrain.shape[0]
print(count_target0/total, count_target1/total)
# View data in each columns
'''
bin_0 ~ bin_4: binary data
nom_0: colors
nom_1: shapes
nom_2: animal types
nom_3: countries
nom_4: instruments
nom_5 ~ nom_9: strings
ord_0: 1, 2, 3
ord_1: Novice, Contributor, Master, Expert, Grandmaster(0-4)
ord_2: Freezing, Cold, Warm, Hot, Boiling Hot, Lava Hot(0-5)
ord_3: a-o (0-14)
ord_4: A-Z (0-25)
ord_5: double letters (e.g. "av", "PZ", "jS", "Ed")
day: 1-7
month: 1-12
'''
# change values according to data types
# for binary data (bin0 - bin4)
bin_col = [col for col in cat.columns if "bin" in col]
# one hot encoding to binary data
bin_data = pd.DataFrame()
for i in bin_col:
temp = pd.get_dummies(cat[i], drop_first = True)
bin_data = pd.concat([bin_data, temp], axis = 1)
bin_data.columns = bin_col
bin_data
# modify values in ord_0 - ord_4
# see alphebat as sequence data
# ord_0 does not need to be converted
# for ord_1: Novice, Contributor, Master, Expert, Grandmaster(0-4)
# according to mean of targets grouped by ord_1 to figure out degrees between the five levels
order1 = catTrain.groupby("ord_1").mean()["target"].sort_values()
order1[range(0, 5)] = range(0, 5)
ord1 = [order1[i] for i in cat["ord_1"]]
# for ord_2: Freezing, Cold, Warm, Hot, Boiling Hot, Lava Hot(0-5)
# according to mean of targets grouped by ord_1 to figure out degrees between the five levels
order2 = catTrain.groupby("ord_2").mean()["target"].sort_values()
order2[range(0, 6)] = range(0, 6)
ord2 = [order2[i] for i in cat["ord_2"]]
ord2
# for ord_3: change alphebat to numbers
order3 = catTrain.groupby("ord_3").mean()["target"].sort_values()
order3[range(0, 15)] = range(0, 15)
ord3 = [order3[i] for i in cat["ord_3"]]
ord3
# for ord_4: change alphebat to numbers
order4 = catTrain.groupby("ord_4").mean()["target"].sort_values()
order4[range(0, 26)] = range(0, 26)
ord4 = [order4[i] for i in cat["ord_4"]]
order4
# for ord_5
order5 = catTrain.groupby("ord_5").mean()["target"].sort_values()
ord5_deg = set(list(cat["ord_5"]))
order5[range(len(ord5_deg))] = range(len(ord5_deg))
ord5 = [order5[i] for i in cat["ord_5"]]
ord5
# combine ord_0 to ord_5
ord1, ord2, ord3, ord4, ord5 = pd.DataFrame(ord1), pd.DataFrame(ord2), pd.DataFrame(ord3), pd.DataFrame(ord4), pd.DataFrame(ord5)
ord_data = pd.concat([cat["ord_0"], ord1, ord2, ord3, ord4, ord5], axis = 1)
ord_data.columns = [col for col in cat.columns if "ord" in col]
ord_data
# periodic data: day and month
period_data = pd.concat([cat["day"], cat["month"]], axis = 1)
period_data
# modify values in nom_5 - nom_9
nom_col = [col for col in cat.columns if "nom" in col and col != "nom_9"]
nom_data = cat[nom_col]
# combine all kinds of data
n_cat = pd.concat([bin_data, ord_data, period_data, nom_data], axis = 1)
n_cat.head(10)
del [bin_data, ord_data, period_data, nom_data]
# one hot encoding except for nom_9 (because of too many candidates)
n_cat = pd.get_dummies(n_cat, columns = nom_col, drop_first = True)
del cat
# resampling
from scipy.sparse import vstack, csr_matrix
from imblearn.over_sampling import RandomOverSampler
x_train = csr_matrix(n_cat[:300000])
y_train = catTrain["target"][:300000]
test = csr_matrix(n_cat[catTrain.shape[0]:])
'''ros = RandomOverSampler(random_state = 0)
x_res, y_res = ros.fit_resample(x_train, y_train)'''
# from sklearn.feature_selection import RFE # Recursive Feature Elimination
from sklearn.linear_model import LogisticRegression
logModel = LogisticRegression()
logModel.fit(x_train, y_train)
# predict test data
predictions = logModel.predict_proba(test)
predict_df = | pd.DataFrame(predictions) | pandas.DataFrame |
import datetime
import pandas as pd
import numpy as np
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from analytics.events.utils.dataframe_builders import SupplementEventsDataframeBuilder, SleepActivityDataframeBuilder, \
ProductivityLogEventsDataframeBuilder
from betterself.utils.api_utils import get_api_value_formatted
from constants import VERY_PRODUCTIVE_TIME_LABEL
from betterself.utils.date_utils import get_current_date_years_ago
from events.models import SupplementLog, SleepLog, DailyProductivityLog
from supplements.models import Supplement
class SupplementAnalyticsMixin(object):
@classmethod
def _get_analytics_dataframe(cls, user, supplement_uuid):
supplement = get_object_or_404(Supplement, uuid=supplement_uuid, user=user)
supplement_series = cls._get_daily_supplement_events_series_last_year(user, supplement)
sleep_series = cls._get_sleep_series_last_year(user)
productivity_series = cls._get_productivity_series_last_year(user)
# if either sleep or productivity are empty, create an empty series that is timezone
# aware (hence, matching the supplement index)
if sleep_series.empty:
sleep_series = | pd.Series(index=supplement_series.index) | pandas.Series |
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from .conftest import (
assert_series_equal, assert_frame_equal, fail_on_pvlib_version)
from numpy.testing import assert_allclose
import unittest.mock as mock
from pvlib import inverter, pvsystem
from pvlib import atmosphere
from pvlib import iam as _iam
from pvlib import irradiance
from pvlib.location import Location
from pvlib import temperature
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.mark.parametrize('iam_model,model_params', [
('ashrae', {'b': 0.05}),
('physical', {'K': 4, 'L': 0.002, 'n': 1.526}),
('martin_ruiz', {'a_r': 0.16}),
])
def test_PVSystem_get_iam(mocker, iam_model, model_params):
m = mocker.spy(_iam, iam_model)
system = pvsystem.PVSystem(module_parameters=model_params)
thetas = 1
iam = system.get_iam(thetas, iam_model=iam_model)
m.assert_called_with(thetas, **model_params)
assert iam < 1.
def test_PVSystem_multi_array_get_iam():
model_params = {'b': 0.05}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=model_params),
pvsystem.Array(module_parameters=model_params)]
)
iam = system.get_iam((1, 5), iam_model='ashrae')
assert len(iam) == 2
assert iam[0] != iam[1]
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_iam((1,), iam_model='ashrae')
def test_PVSystem_get_iam_sapm(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(_iam, 'sapm')
aoi = 0
out = system.get_iam(aoi, 'sapm')
_iam.sapm.assert_called_once_with(aoi, sapm_module_params)
assert_allclose(out, 1.0, atol=0.01)
def test_PVSystem_get_iam_interp(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='interp')
def test__normalize_sam_product_names():
BAD_NAMES = [' -.()[]:+/",', 'Module[1]']
NORM_NAMES = ['____________', 'Module_1_']
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module(1)']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module[1]']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
def test_PVSystem_get_iam_invalid(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='not_a_model')
def test_retrieve_sam_raise_no_parameters():
"""
Raise an exception if no parameters are provided to `retrieve_sam()`.
"""
with pytest.raises(ValueError) as error:
pvsystem.retrieve_sam()
assert 'A name or path must be provided!' == str(error.value)
def test_retrieve_sam_cecmod():
"""
Test the expected data is retrieved from the CEC module database. In
particular, check for a known module in the database and check for the
expected keys for that module.
"""
data = pvsystem.retrieve_sam('cecmod')
keys = [
'BIPV',
'Date',
'T_NOCT',
'A_c',
'N_s',
'I_sc_ref',
'V_oc_ref',
'I_mp_ref',
'V_mp_ref',
'alpha_sc',
'beta_oc',
'a_ref',
'I_L_ref',
'I_o_ref',
'R_s',
'R_sh_ref',
'Adjust',
'gamma_r',
'Version',
'STC',
'PTC',
'Technology',
'Bifacial',
'Length',
'Width',
]
module = 'Itek_Energy_LLC_iT_300_HE'
assert module in data
assert set(data[module].keys()) == set(keys)
def test_retrieve_sam_cecinverter():
"""
Test the expected data is retrieved from the CEC inverter database. In
particular, check for a known inverter in the database and check for the
expected keys for that inverter.
"""
data = pvsystem.retrieve_sam('cecinverter')
keys = [
'Vac',
'Paco',
'Pdco',
'Vdco',
'Pso',
'C0',
'C1',
'C2',
'C3',
'Pnt',
'Vdcmax',
'Idcmax',
'Mppt_low',
'Mppt_high',
'CEC_Date',
'CEC_Type',
]
inverter = 'Yaskawa_Solectria_Solar__PVI_5300_208__208V_'
assert inverter in data
assert set(data[inverter].keys()) == set(keys)
def test_sapm(sapm_module_params):
times = pd.date_range(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1000, 500, 1100, np.nan, 1000],
index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1000, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with Series input
pvsystem.sapm(effective_irradiance, temp_cell,
pd.Series(sapm_module_params))
def test_PVSystem_sapm(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
effective_irradiance = 500
temp_cell = 25
out = system.sapm(effective_irradiance, temp_cell)
pvsystem.sapm.assert_called_once_with(effective_irradiance, temp_cell,
sapm_module_params)
assert_allclose(out['p_mp'], 100, atol=100)
def test_PVSystem_multi_array_sapm(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
effective_irradiance = (100, 500)
temp_cell = (15, 25)
sapm_one, sapm_two = system.sapm(effective_irradiance, temp_cell)
assert sapm_one['p_mp'] != sapm_two['p_mp']
sapm_one_flip, sapm_two_flip = system.sapm(
(effective_irradiance[1], effective_irradiance[0]),
(temp_cell[1], temp_cell[0])
)
assert sapm_one_flip['p_mp'] == sapm_two['p_mp']
assert sapm_two_flip['p_mp'] == sapm_one['p_mp']
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(effective_irradiance, 10)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(500, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm_spectral_loss')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
airmass = 2
out = system.sapm_spectral_loss(airmass)
pvsystem.sapm_spectral_loss.assert_called_once_with(airmass,
sapm_module_params)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
loss_one, loss_two = system.sapm_spectral_loss(2)
assert loss_one == loss_two
# this test could be improved to cover all cell types.
# could remove the need for specifying spectral coefficients if we don't
# care about the return value at all
@pytest.mark.parametrize('module_parameters,module_type,coefficients', [
({'Technology': 'mc-Si'}, 'multisi', None),
({'Material': 'Multi-c-Si'}, 'multisi', None),
({'first_solar_spectral_coefficients': (
0.84, -0.03, -0.008, 0.14, 0.04, -0.002)},
None,
(0.84, -0.03, -0.008, 0.14, 0.04, -0.002))
])
def test_PVSystem_first_solar_spectral_loss(module_parameters, module_type,
coefficients, mocker):
mocker.spy(atmosphere, 'first_solar_spectral_correction')
system = pvsystem.PVSystem(module_parameters=module_parameters)
pw = 3
airmass_absolute = 3
out = system.first_solar_spectral_loss(pw, airmass_absolute)
atmosphere.first_solar_spectral_correction.assert_called_once_with(
pw, airmass_absolute, module_type, coefficients)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
),
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
)
]
)
loss_one, loss_two = system.first_solar_spectral_loss(1, 3)
assert loss_one == loss_two
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45], 1140.0510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10])],
np.array([np.nan, np.nan, 1081.1574])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10])],
pd.Series([1081.1574]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-1)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(pvsystem, 'sapm_effective_irradiance')
poa_direct = 900
poa_diffuse = 100
airmass_absolute = 1.5
aoi = 0
p = (sapm_module_params['A4'], sapm_module_params['A3'],
sapm_module_params['A2'], sapm_module_params['A1'],
sapm_module_params['A0'])
f1 = np.polyval(p, airmass_absolute)
expected = f1 * (poa_direct + sapm_module_params['FD'] * poa_diffuse)
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi)
pvsystem.sapm_effective_irradiance.assert_called_once_with(
poa_direct, poa_diffuse, airmass_absolute, aoi, sapm_module_params)
assert_allclose(out, expected, atol=0.1)
def test_PVSystem_multi_array_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
poa_direct = (500, 900)
poa_diffuse = (50, 100)
aoi = (0, 10)
airmass_absolute = 1.5
irrad_one, irrad_two = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi
)
assert irrad_one != irrad_two
@pytest.fixture
def two_array_system(pvsyst_module_params, cec_module_params):
"""Two-array PVSystem.
Both arrays are identical.
"""
temperature_model = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass'
]
# Need u_v to be non-zero so wind-speed changes cell temperature
# under the pvsyst model.
temperature_model['u_v'] = 1.0
# parameter for fuentes temperature model
temperature_model['noct_installed'] = 45
# parameters for noct_sam temperature model
temperature_model['noct'] = 45.
temperature_model['module_efficiency'] = 0.2
module_params = {**pvsyst_module_params, **cec_module_params}
return pvsystem.PVSystem(
arrays=[
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
),
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
)
]
)
@pytest.mark.parametrize("poa_direct, poa_diffuse, aoi",
[(20, (10, 10), (20, 20)),
((20, 20), (10,), (20, 20)),
((20, 20), (10, 10), 20)])
def test_PVSystem_sapm_effective_irradiance_value_error(
poa_direct, poa_diffuse, aoi, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
two_array_system.sapm_effective_irradiance(
poa_direct, poa_diffuse, 10, aoi
)
def test_PVSystem_sapm_celltemp(mocker):
a, b, deltaT = (-3.47, -0.0594, 3) # open_rack_glass_glass
temp_model_params = {'a': a, 'b': b, 'deltaT': deltaT}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, a, b,
deltaT)
assert_allclose(out, 57, atol=1)
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds,
temp_model_params['a'],
temp_model_params['b'],
temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1)
def test_PVSystem_multi_array_sapm_celltemp_different_arrays():
temp_model_one = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
temp_model_two = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'close_mount_glass_glass']
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(temperature_model_parameters=temp_model_one),
pvsystem.Array(temperature_model_parameters=temp_model_two)]
)
temp_one, temp_two = system.sapm_celltemp(
(1000, 1000), 25, 1
)
assert temp_one != temp_two
def test_PVSystem_pvsyst_celltemp(mocker):
parameter_set = 'insulated'
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['pvsyst'][
parameter_set]
alpha_absorption = 0.85
module_efficiency = 0.17
module_parameters = {'alpha_absorption': alpha_absorption,
'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(module_parameters=module_parameters,
temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'pvsyst_cell')
irrad = 800
temp = 45
wind = 0.5
out = system.pvsyst_celltemp(irrad, temp, wind_speed=wind)
temperature.pvsyst_cell.assert_called_once_with(
irrad, temp, wind_speed=wind, u_c=temp_model_params['u_c'],
u_v=temp_model_params['u_v'], module_efficiency=module_efficiency,
alpha_absorption=alpha_absorption)
assert (out < 90) and (out > 70)
def test_PVSystem_faiman_celltemp(mocker):
u0, u1 = 25.0, 6.84 # default values
temp_model_params = {'u0': u0, 'u1': u1}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'faiman')
temps = 25
irrads = 1000
winds = 1
out = system.faiman_celltemp(irrads, temps, winds)
temperature.faiman.assert_called_once_with(irrads, temps, winds, u0, u1)
assert_allclose(out, 56.4, atol=1)
def test_PVSystem_noct_celltemp(mocker):
poa_global, temp_air, wind_speed, noct, module_efficiency = (
1000., 25., 1., 45., 0.2)
expected = 55.230790492
temp_model_params = {'noct': noct, 'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'noct_sam')
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
temperature.noct_sam.assert_called_once_with(
poa_global, temp_air, wind_speed, effective_irradiance=None, noct=noct,
module_efficiency=module_efficiency)
assert_allclose(out, expected)
# dufferent types
out = system.noct_sam_celltemp(np.array(poa_global), np.array(temp_air),
np.array(wind_speed))
assert_allclose(out, expected)
dr = pd.date_range(start='2020-01-01 12:00:00', end='2020-01-01 13:00:00',
freq='1H')
out = system.noct_sam_celltemp(pd.Series(index=dr, data=poa_global),
pd.Series(index=dr, data=temp_air),
pd.Series(index=dr, data=wind_speed))
assert_series_equal(out, pd.Series(index=dr, data=expected))
# now use optional arguments
temp_model_params.update({'transmittance_absorptance': 0.8,
'array_height': 2,
'mount_standoff': 2.0})
expected = 60.477703576
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed,
effective_irradiance=1100.)
assert_allclose(out, expected)
def test_PVSystem_noct_celltemp_error():
poa_global, temp_air, wind_speed, module_efficiency = (1000., 25., 1., 0.2)
temp_model_params = {'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
with pytest.raises(KeyError):
system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_functions(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad_one = pd.Series(1000, index=times)
irrad_two = pd.Series(500, index=times)
temp_air = pd.Series(25, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system, (irrad_one, irrad_two), temp_air, wind_speed)
assert (temp_one != temp_two).all()
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_temp(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air_one = pd.Series(25, index=times)
temp_air_two = pd.Series(5, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_one, temp_air_two),
wind_speed
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_two, temp_air_one),
wind_speed
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_wind(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air = pd.Series(25, index=times)
wind_speed_one = pd.Series(1, index=times)
wind_speed_two = pd.Series(5, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_one, wind_speed_two)
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_two, wind_speed_one)
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1,), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1, 1, 1), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1,))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1, 1, 1))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_poa_length_mismatch(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, 1000, 25, 1)
def test_PVSystem_fuentes_celltemp(mocker):
noct_installed = 45
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
spy = mocker.spy(temperature, 'fuentes')
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
out = system.fuentes_celltemp(irrads, temps, winds)
assert_series_equal(spy.call_args[0][0], irrads)
assert_series_equal(spy.call_args[0][1], temps)
assert_series_equal(spy.call_args[0][2], winds)
assert spy.call_args[1]['noct_installed'] == noct_installed
assert_series_equal(out, pd.Series([52.85, 55.85, 55.85], index,
name='tmod'))
def test_PVSystem_fuentes_celltemp_override(mocker):
# test that the surface_tilt value in the cell temp calculation can be
# overridden but defaults to the surface_tilt attribute of the PVSystem
spy = mocker.spy(temperature, 'fuentes')
noct_installed = 45
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
# uses default value
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 20
# can be overridden
temp_model_params = {'noct_installed': noct_installed, 'surface_tilt': 30}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 30
def test_Array__infer_temperature_model_params():
array = pvsystem.Array(module_parameters={},
racking_model='open_rack',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'sapm']['open_rack_glass_polymer']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='freestanding',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['freestanding']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='insulated',
module_type=None)
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['insulated']
assert expected == array._infer_temperature_model_params()
def test_Array__infer_cell_type():
array = pvsystem.Array(module_parameters={})
assert array._infer_cell_type() is None
def test_calcparams_desoto(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = | pd.Series([0.0, 800.0, 800.0], index=times) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import collections
import pytest
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from pandas.compat import StringIO, u
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesToCSV(TestData):
def read_csv(self, path, **kwargs):
params = dict(squeeze=True, index_col=0,
header=None, parse_dates=True)
params.update(**kwargs)
header = params.get("header")
out = pd.read_csv(path, **params)
if header is None:
out.name = out.index.name = None
return out
def test_from_csv_deprecation(self):
# see gh-17812
with ensure_clean() as path:
self.ts.to_csv(path)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = self.read_csv(path)
depr_ts = Series.from_csv(path)
assert_series_equal(depr_ts, ts)
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = self.read_csv(path)
assert_series_equal(self.ts, ts, check_names=False)
assert ts.name is None
assert ts.index.name is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
depr_ts = Series.from_csv(path)
| assert_series_equal(depr_ts, ts) | pandas.util.testing.assert_series_equal |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), chunksize=1)
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for 'iteration'"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, **kwargs)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
dict(index_col=0, names=["index", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
tm.assert_frame_equal(result, expected)
@tm.network
def test_url(all_parsers, csv_dir_path):
# TODO: FTP testing
parser = all_parsers
kwargs = dict(sep="\t")
url = (
"https://raw.github.com/pandas-dev/pandas/master/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = dict(sep="\t")
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
path = "{}.csv".format(tm.rands(10))
msg = f"File {path} does not exist" if parser.engine == "c" else r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
filename = e.value.filename
assert path == filename
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"Numbers": [
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194,
]
}
)
tm.assert_frame_equal(result, expected)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(StringIO(data))
assert df.a.dtype == np.object
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = (
"Too many columns specified: expected 4 and found 3"
if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
)
tm.assert_frame_equal(result, expected)
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
)
tm.assert_frame_equal(result, expected)
def test_float_parser(all_parsers):
# see gh-9565
parser = all_parsers
data = "45e-1,4.5,45.,inf,-inf"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(",")]])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatParams = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatParams
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_featAcc = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_featAcc
def preProcessPerm():
dicKNN = json.loads(allParametersPerformancePerModel[4])
dicSVC = json.loads(allParametersPerformancePerModel[13])
dicGausNB = json.loads(allParametersPerformancePerModel[22])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[40])
dicLDA = json.loads(allParametersPerformancePerModel[49])
dicQDA = json.loads(allParametersPerformancePerModel[58])
dicRF = json.loads(allParametersPerformancePerModel[67])
dicExtraT = json.loads(allParametersPerformancePerModel[76])
dicAdaB = json.loads(allParametersPerformancePerModel[85])
dicGradB = json.loads(allParametersPerformancePerModel[94])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_perm = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_perm
def preProcessFeatSc():
dicKNN = json.loads(allParametersPerformancePerModel[5])
dfKNN = pd.DataFrame.from_dict(dicKNN)
return dfKNN
# remove that maybe!
def preProcsumPerMetric(factors):
sumPerClassifier = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
loopThroughMetrics.loc[:, 'log_loss'] = 1 - loopThroughMetrics.loc[:, 'log_loss']
for row in loopThroughMetrics.iterrows():
rowSum = 0
name, values = row
for loop, elements in enumerate(values):
rowSum = elements*factors[loop] + rowSum
if sum(factors) == 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/sum(factors) * 100)
return sumPerClassifier
def preProcMetricsAllAndSel():
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
global factors
metricsPerModelColl = []
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f5_micro'])
metricsPerModelColl.append(loopThroughMetrics['f5_macro'])
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f1_micro'])
metricsPerModelColl.append(loopThroughMetrics['f1_macro'])
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f2_micro'])
metricsPerModelColl.append(loopThroughMetrics['f2_macro'])
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'])
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'])
metricsPerModelColl.append(loopThroughMetrics['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelColl):
if (index == 19):
metricsPerModelColl[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelColl[index] = ((1 - metric)*factors[index] ) * 100
else:
metricsPerModelColl[index] = (metric*factors[index]) * 100
metricsPerModelColl[index] = metricsPerModelColl[index].to_json()
return metricsPerModelColl
def preProceModels():
models = KNNModels + SVCModels + GausNBModels + MLPModels + LRModels + LDAModels + QDAModels + RFModels + ExtraTModels + AdaBModels + GradBModels
return models
def FunMDS (data):
mds = MDS(n_components=2, random_state=RANDOM_SEED)
XTransformed = mds.fit_transform(data).T
XTransformed = XTransformed.tolist()
return XTransformed
def FunTsne (data):
tsne = TSNE(n_components=2, random_state=RANDOM_SEED).fit_transform(data)
tsne.shape
return tsne
def FunUMAP (data):
trans = umap.UMAP(n_neighbors=15, random_state=RANDOM_SEED).fit(data)
Xpos = trans.embedding_[:, 0].tolist()
Ypos = trans.embedding_[:, 1].tolist()
return [Xpos,Ypos]
def InitializeEnsemble():
XModels = PreprocessingMetrics()
global ModelSpaceMDS
global ModelSpaceTSNE
global allParametersPerformancePerModel
global impDataInst
XModels = XModels.fillna(0)
ModelSpaceMDS = FunMDS(XModels)
ModelSpaceTSNE = FunTsne(XModels)
ModelSpaceTSNE = ModelSpaceTSNE.tolist()
ModelSpaceUMAP = FunUMAP(XModels)
PredictionProbSel = PreprocessingPred()
PredictionSpaceMDS = FunMDS(PredictionProbSel)
PredictionSpaceTSNE = FunTsne(PredictionProbSel)
PredictionSpaceTSNE = PredictionSpaceTSNE.tolist()
PredictionSpaceUMAP = FunUMAP(PredictionProbSel)
ModelsIDs = preProceModels()
impDataInst = processDataInstance(ModelsIDs,allParametersPerformancePerModel)
callPreResults()
key = 0
EnsembleModel(ModelsIDs, key)
ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP)
def processDataInstance(ModelsIDs, allParametersPerformancePerModel):
dicKNN = json.loads(allParametersPerformancePerModel[8])
dicKNN = json.loads(dicKNN)
dicSVC = json.loads(allParametersPerformancePerModel[17])
dicSVC = json.loads(dicSVC)
dicGausNB = json.loads(allParametersPerformancePerModel[26])
dicGausNB = json.loads(dicGausNB)
dicMLP = json.loads(allParametersPerformancePerModel[35])
dicMLP = json.loads(dicMLP)
dicLR = json.loads(allParametersPerformancePerModel[44])
dicLR = json.loads(dicLR)
dicLDA = json.loads(allParametersPerformancePerModel[53])
dicLDA = json.loads(dicLDA)
dicQDA = json.loads(allParametersPerformancePerModel[62])
dicQDA = json.loads(dicQDA)
dicRF = json.loads(allParametersPerformancePerModel[71])
dicRF = json.loads(dicRF)
dicExtraT = json.loads(allParametersPerformancePerModel[80])
dicExtraT = json.loads(dicExtraT)
dicAdaB = json.loads(allParametersPerformancePerModel[89])
dicAdaB = json.loads(dicAdaB)
dicGradB = json.loads(allParametersPerformancePerModel[98])
dicGradB = json.loads(dicGradB)
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_connect = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
global yData
global filterActionFinal
global dataSpacePointsIDs
lengthDF = len(df_connect.columns)
if (filterActionFinal == 'compose'):
getList = []
for index, row in df_connect.iterrows():
yDataSelected = []
for column in row[dataSpacePointsIDs]:
yDataSelected.append(column)
storeMode = mode(yDataSelected)
getList.append(storeMode)
df_connect[str(lengthDF)] = getList
countCorrect = []
length = len(df_connect.index)
for index, element in enumerate(yData):
countTemp = 0
dfPart = df_connect[[str(index)]]
for indexdf, row in dfPart.iterrows():
if (int(row.values[0]) == int(element)):
countTemp += 1
countCorrect.append(1 - (countTemp/length))
return countCorrect
def ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP):
global Results
global AllTargets
Results = []
parametersGen = PreprocessingParam()
PerClassMetrics = preProcessPerClassM()
FeatureAccuracy = preProcessFeatAcc()
perm_imp_eli5PDCon = preProcessPerm()
featureScoresCon = preProcessFeatSc()
metricsPerModel = preProcMetricsAllAndSel()
sumPerClassifier = preProcsumPerMetric(factors)
ModelsIDs = preProceModels()
parametersGenPD = parametersGen.to_json(orient='records')
PerClassMetrics = PerClassMetrics.to_json(orient='records')
FeatureAccuracy = FeatureAccuracy.to_json(orient='records')
perm_imp_eli5PDCon = perm_imp_eli5PDCon.to_json(orient='records')
featureScoresCon = featureScoresCon.to_json(orient='records')
XDataJSONEntireSet = XData.to_json(orient='records')
XDataJSON = XData.columns.tolist()
Results.append(json.dumps(sumPerClassifier)) # Position: 0
Results.append(json.dumps(ModelSpaceMDS)) # Position: 1
Results.append(json.dumps(parametersGenPD)) # Position: 2
Results.append(PerClassMetrics) # Position: 3
Results.append(json.dumps(target_names)) # Position: 4
Results.append(FeatureAccuracy) # Position: 5
Results.append(json.dumps(XDataJSON)) # Position: 6
Results.append(0) # Position: 7
Results.append(json.dumps(PredictionSpaceMDS)) # Position: 8
Results.append(json.dumps(metricsPerModel)) # Position: 9
Results.append(perm_imp_eli5PDCon) # Position: 10
Results.append(featureScoresCon) # Position: 11
Results.append(json.dumps(ModelSpaceTSNE)) # Position: 12
Results.append(json.dumps(ModelsIDs)) # Position: 13
Results.append(json.dumps(XDataJSONEntireSet)) # Position: 14
Results.append(json.dumps(yData)) # Position: 15
Results.append(json.dumps(AllTargets)) # Position: 16
Results.append(json.dumps(ModelSpaceUMAP)) # Position: 17
Results.append(json.dumps(PredictionSpaceTSNE)) # Position: 18
Results.append(json.dumps(PredictionSpaceUMAP)) # Position: 19
return Results
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/PlotClassifiers', methods=["GET", "POST"])
def SendToPlot():
while (len(DataResultsRaw) != DataRawLength):
pass
InitializeEnsemble()
response = {
'OverviewResults': Results
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRemoveFromStack', methods=["GET", "POST"])
def RetrieveSelClassifiersIDandRemoveFromStack():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
PredictionProbSelUpdate = PreprocessingPredUpdate(ClassifierIDsList)
global resultsUpdatePredictionSpace
resultsUpdatePredictionSpace = []
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[0])) # Position: 0
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[1]))
key = 3
EnsembleModel(ClassifierIDsList, key)
return 'Everything Okay'
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/UpdatePredictionsSpace', methods=["GET", "POST"])
def SendPredBacktobeUpdated():
response = {
'UpdatePredictions': resultsUpdatePredictionSpace
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoin', methods=["GET", "POST"])
def RetrieveSelClassifiersID():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
#ComputeMetricsForSel(ClassifierIDsList)
ClassifierIDCleaned = json.loads(ClassifierIDsList)
global keySpecInternal
keySpecInternal = 1
keySpecInternal = ClassifierIDCleaned['keyNow']
EnsembleModel(ClassifierIDsList, 1)
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoinLocally', methods=["GET", "POST"])
def RetrieveSelClassifiersIDLocally():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
ComputeMetricsForSel(ClassifierIDsList)
return 'Everything Okay'
def ComputeMetricsForSel(Models):
Models = json.loads(Models)
MetricsAlltoSel = PreprocessingMetrics()
listofModels = []
for loop in Models['ClassifiersList']:
listofModels.append(loop)
MetricsAlltoSel = MetricsAlltoSel.loc[listofModels,:]
global metricsPerModelCollSel
global factors
metricsPerModelCollSel = []
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_accuracy'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['matthews_corrcoef'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_roc_auc_ovo_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelCollSel):
if (index == 19):
metricsPerModelCollSel[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelCollSel[index] = (1 - metric)*factors[index] * 100
else:
metricsPerModelCollSel[index] = metric*factors[index] * 100
metricsPerModelCollSel[index] = metricsPerModelCollSel[index].to_json()
return 'okay'
# function to get unique values
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/BarChartSelectedModels', methods=["GET", "POST"])
def SendToUpdateBarChart():
response = {
'SelectedMetricsForModels': metricsPerModelCollSel
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestDataPoint', methods=["GET", "POST"])
def RetrieveSelDataPoints():
DataPointsSel = request.get_data().decode('utf8').replace("'", '"')
DataPointsSelClear = json.loads(DataPointsSel)
listofDataPoints = []
for loop in DataPointsSelClear['DataPointsSel']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
listofDataPoints.append(temp[0])
global algorithmsList
global resultsMetrics
resultsMetrics = []
df_concatMetrics = []
metricsSelList = []
paramsListSepPD = []
paramsListSepPD = PreprocessingParamSep()
paramsListSeptoDicKNN = paramsListSepPD[0].to_dict(orient='list')
paramsListSeptoDicSVC = paramsListSepPD[1].to_dict(orient='list')
paramsListSeptoDicGausNB = paramsListSepPD[2].to_dict(orient='list')
paramsListSeptoDicMLP = paramsListSepPD[3].to_dict(orient='list')
paramsListSeptoDicLR = paramsListSepPD[4].to_dict(orient='list')
paramsListSeptoDicLDA = paramsListSepPD[5].to_dict(orient='list')
paramsListSeptoDicQDA = paramsListSepPD[6].to_dict(orient='list')
paramsListSeptoDicRF = paramsListSepPD[7].to_dict(orient='list')
paramsListSeptoDicExtraT = paramsListSepPD[8].to_dict(orient='list')
paramsListSeptoDicAdaB = paramsListSepPD[9].to_dict(orient='list')
paramsListSeptoDicGradB = paramsListSepPD[10].to_dict(orient='list')
RetrieveParamsCleared = {}
RetrieveParamsClearedListKNN = []
for key, value in paramsListSeptoDicKNN.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListKNN.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListSVC = []
for key, value in paramsListSeptoDicSVC.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListSVC.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGausNB = []
for key, value in paramsListSeptoDicGausNB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGausNB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListMLP = []
for key, value in paramsListSeptoDicMLP.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListMLP.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLR = []
for key, value in paramsListSeptoDicLR.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLR.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLDA = []
for key, value in paramsListSeptoDicLDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListQDA = []
for key, value in paramsListSeptoDicQDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListQDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListRF = []
for key, value in paramsListSeptoDicRF.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListRF.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListExtraT = []
for key, value in paramsListSeptoDicExtraT.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListExtraT.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListAdaB = []
for key, value in paramsListSeptoDicAdaB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListAdaB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGradB = []
for key, value in paramsListSeptoDicGradB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGradB.append(RetrieveParamsCleared)
if (len(paramsListSeptoDicKNN['n_neighbors']) == 0):
RetrieveParamsClearedListKNN = []
if (len(paramsListSeptoDicSVC['C']) == 0):
RetrieveParamsClearedListSVC = []
if (len(paramsListSeptoDicGausNB['var_smoothing']) == 0):
RetrieveParamsClearedListGausNB = []
if (len(paramsListSeptoDicMLP['alpha']) == 0):
RetrieveParamsClearedListMLP = []
if (len(paramsListSeptoDicLR['C']) == 0):
RetrieveParamsClearedListLR = []
if (len(paramsListSeptoDicLDA['shrinkage']) == 0):
RetrieveParamsClearedListLDA = []
if (len(paramsListSeptoDicQDA['reg_param']) == 0):
RetrieveParamsClearedListQDA = []
if (len(paramsListSeptoDicRF['n_estimators']) == 0):
RetrieveParamsClearedListRF = []
if (len(paramsListSeptoDicExtraT['n_estimators']) == 0):
RetrieveParamsClearedListExtraT = []
if (len(paramsListSeptoDicAdaB['n_estimators']) == 0):
RetrieveParamsClearedListAdaB = []
if (len(paramsListSeptoDicGradB['n_estimators']) == 0):
RetrieveParamsClearedListGradB = []
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = RetrieveParamsClearedListKNN
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = RetrieveParamsClearedListSVC
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = RetrieveParamsClearedListGausNB
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListMLP
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListLR
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = RetrieveParamsClearedListLDA
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = RetrieveParamsClearedListQDA
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListRF
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListExtraT
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListAdaB
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListGradB
AlgorithmsIDsEnd = GradBModelsCount
metricsSelList = GridSearchSel(clf, params, factors, AlgorithmsIDsEnd, listofDataPoints, crossValidation)
if (len(metricsSelList[0]) != 0 and len(metricsSelList[1]) != 0 and len(metricsSelList[2]) != 0 and len(metricsSelList[3]) != 0 and len(metricsSelList[4]) != 0 and len(metricsSelList[5]) != 0 and len(metricsSelList[6]) != 0 and len(metricsSelList[7]) != 0 and len(metricsSelList[8]) != 0 and len(metricsSelList[9]) != 0 and len(metricsSelList[10]) != 0):
dicKNN = json.loads(metricsSelList[0])
dfKNN = pd.DataFrame.from_dict(dicKNN)
parametersSelDataPD = parametersSelData[0].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[0], paramsListSepPD[0]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfKNNCleared = dfKNN
else:
dfKNNCleared = dfKNN.drop(dfKNN.index[set_diff_df])
dicSVC = json.loads(metricsSelList[1])
dfSVC = pd.DataFrame.from_dict(dicSVC)
parametersSelDataPD = parametersSelData[1].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[1], paramsListSepPD[1]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfSVCCleared = dfSVC
else:
dfSVCCleared = dfSVC.drop(dfSVC.index[set_diff_df])
dicGausNB = json.loads(metricsSelList[2])
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
parametersSelDataPD = parametersSelData[2].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[2], paramsListSepPD[2]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfGausNBCleared = dfGausNB
else:
dfGausNBCleared = dfGausNB.drop(dfGausNB.index[set_diff_df])
dicMLP = json.loads(metricsSelList[3])
dfMLP = pd.DataFrame.from_dict(dicMLP)
parametersSelDataPD = parametersSelData[3].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[3], paramsListSepPD[3]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfMLPCleared = dfMLP
else:
dfMLPCleared = dfMLP.drop(dfMLP.index[set_diff_df])
dicLR = json.loads(metricsSelList[4])
dfLR = pd.DataFrame.from_dict(dicLR)
parametersSelDataPD = parametersSelData[4].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[4], paramsListSepPD[4]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfLRCleared = dfLR
else:
dfLRCleared = dfLR.drop(dfLR.index[set_diff_df])
dicLDA = json.loads(metricsSelList[5])
dfLDA = pd.DataFrame.from_dict(dicLDA)
parametersSelDataPD = parametersSelData[5].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[5], paramsListSepPD[5]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfLDACleared = dfLDA
else:
dfLDACleared = dfLDA.drop(dfLDA.index[set_diff_df])
dicQDA = json.loads(metricsSelList[6])
dfQDA = pd.DataFrame.from_dict(dicQDA)
parametersSelDataPD = parametersSelData[6].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[6], paramsListSepPD[6]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfQDACleared = dfQDA
else:
dfQDACleared = dfQDA.drop(dfQDA.index[set_diff_df])
dicRF = json.loads(metricsSelList[7])
dfRF = pd.DataFrame.from_dict(dicRF)
parametersSelDataPD = parametersSelData[7].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[7], paramsListSepPD[7]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfRFCleared = dfRF
else:
dfRFCleared = dfRF.drop(dfRF.index[set_diff_df])
dicExtraT = json.loads(metricsSelList[8])
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
parametersSelDataPD = parametersSelData[8].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[8], paramsListSepPD[8]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfExtraTCleared = dfExtraT
else:
dfExtraTCleared = dfExtraT.drop(dfExtraT.index[set_diff_df])
dicAdaB = json.loads(metricsSelList[9])
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
parametersSelDataPD = parametersSelData[9].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[9], paramsListSepPD[9]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfAdaBCleared = dfAdaB
else:
dfAdaBCleared = dfAdaB.drop(dfAdaB.index[set_diff_df])
dicGradB = json.loads(metricsSelList[10])
dfGradB = pd.DataFrame.from_dict(dicGradB)
parametersSelDataPD = parametersSelData[10].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[10], paramsListSepPD[10]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfGradBCleared = dfGradB
else:
dfGradBCleared = dfGradB.drop(dfGradB.index[set_diff_df])
df_concatMetrics = pd.concat([dfKNNCleared, dfSVCCleared, dfGausNBCleared, dfMLPCleared, dfLRCleared, dfLDACleared, dfQDACleared, dfRFCleared, dfExtraTCleared, dfAdaBCleared, dfGradBCleared])
else:
dfSVCCleared = pd.DataFrame()
dfKNNCleared = pd.DataFrame()
dfGausNBCleared = pd.DataFrame()
dfMLPCleared = pd.DataFrame()
dfLRCleared = pd.DataFrame()
dfLDACleared = pd.DataFrame()
dfQDACleared = pd.DataFrame()
dfRFCleared = pd.DataFrame()
dfExtraTCleared = pd.DataFrame()
dfAdaBCleared = pd.DataFrame()
dfGradBCleared = pd.DataFrame()
if (len(metricsSelList[0]) != 0):
dicKNN = json.loads(metricsSelList[0])
dfKNN = pd.DataFrame.from_dict(dicKNN)
parametersSelDataPD = parametersSelData[0].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[0], paramsListSepPD[0]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfKNNCleared = dfKNN
else:
dfKNNCleared = dfKNN.drop(dfKNN.index[set_diff_df])
if (len(metricsSelList[1]) != 0):
dicSVC = json.loads(metricsSelList[1])
dfSVC = pd.DataFrame.from_dict(dicSVC)
parametersSelDataPD = parametersSelData[1].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[1], paramsListSepPD[1]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfSVCCleared = dfSVC
else:
dfSVCCleared = dfSVC.drop(dfSVC.index[set_diff_df])
if (len(metricsSelList[2]) != 0):
dicGausNB = json.loads(metricsSelList[2])
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
parametersSelDataPD = parametersSelData[2].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[2], paramsListSepPD[2]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfGausNBCleared = dfGausNB
else:
dfGausNBCleared = dfGausNB.drop(dfGausNB.index[set_diff_df])
if (len(metricsSelList[3]) != 0):
dicMLP = json.loads(metricsSelList[3])
dfMLP = pd.DataFrame.from_dict(dicMLP)
parametersSelDataPD = parametersSelData[3].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[3], paramsListSepPD[3]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfMLPCleared = dfMLP
else:
dfMLPCleared = dfMLP.drop(dfMLP.index[set_diff_df])
if (len(metricsSelList[4]) != 0):
dicLR = json.loads(metricsSelList[4])
dfLR = pd.DataFrame.from_dict(dicLR)
parametersSelDataPD = parametersSelData[4].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[4], paramsListSepPD[4]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfLRCleared = dfLR
else:
dfLRCleared = dfLR.drop(dfLR.index[set_diff_df])
if (len(metricsSelList[5]) != 0):
dicLDA = json.loads(metricsSelList[5])
dfLDA = pd.DataFrame.from_dict(dicLDA)
parametersSelDataPD = parametersSelData[5].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[5], paramsListSepPD[5]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfLDACleared = dfLDA
else:
dfLDACleared = dfLDA.drop(dfLDA.index[set_diff_df])
if (len(metricsSelList[6]) != 0):
dicQDA = json.loads(metricsSelList[6])
dfQDA = pd.DataFrame.from_dict(dicQDA)
parametersSelDataPD = parametersSelData[6].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[6], paramsListSepPD[6]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfQDACleared = dfQDA
else:
dfQDACleared = dfQDA.drop(dfQDA.index[set_diff_df])
if (len(metricsSelList[7]) != 0):
dicRF = json.loads(metricsSelList[7])
dfRF = pd.DataFrame.from_dict(dicRF)
parametersSelDataPD = parametersSelData[7].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[7], paramsListSepPD[7]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfRFCleared = dfRF
else:
dfRFCleared = dfRF.drop(dfRF.index[set_diff_df])
if (len(metricsSelList[8]) != 0):
dicExtraT = json.loads(metricsSelList[8])
dfExtraT = | pd.DataFrame.from_dict(dicExtraT) | pandas.DataFrame.from_dict |
from requests import get
import matplotlib as plt
import statsmodels.api as sm
from statsmodels.tsa import ar_model
from statsmodels.tsa.base.datetools import dates_from_str
from statsmodels.tsa.stattools import adfuller
import numpy as np
import pandas
import re
import datetime
import json
from pymongo import MongoClient
def get_gov_data(endpoint):
response = get(endpoint, timeout=10)
if response.status_code >= 400:
raise RuntimeError(f'Request failed: { response.text }')
return response.json()
def sort_and_prep_data(raw_data):
glob_dates = []
glob_cases = []
for datapoint in raw_data['data']:
glob_dates.append(datapoint['date'])
glob_cases.append(datapoint['newCases'])
dataReady = | pandas.DataFrame(columns=['New Cases']) | pandas.DataFrame |
import logging
import os
from multiprocessing import Pool, Value
from Bio import AlignIO
from Bio.Phylo.TreeConstruction import DistanceCalculator
import pandas as pd
from thexb.UTIL_checks import check_fasta
################################ Important Info ################################
"""
Input:
- Single file or a directory containing multiple files.
- Window size to calculate p-distance in
- Threshold of missing data to drop window calculation (default: 0.75)
File name format: ChromosomeName.fasta
Input directory Structure:
WholeGenomeInSingleDirectory/
chr1.fasta
chr2.fasta
chr3.fasta
...
...
Info:
Single file input will return a single file output file while multi-file returns
output for each file as well as a cumulative file to put into p-Distance Tracer.
Do not need to provide .fai file, pyfaidx will create one if cannot be found.
Functionality:
- Calculate p-distance in windows
- Return nan for window where a sample has more than (provided threshold) missing data (i.e., >0.75)
"""
############################### Set up logger #################################
logger = logging.getLogger(__name__)
def set_logger_level(WORKING_DIR, LOG_LEVEL):
# Remove existing log file if present
if os.path.exists(WORKING_DIR / 'logs/pdistance_calculator.log'):
os.remove(WORKING_DIR / 'logs/pdistance_calculator.log')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(WORKING_DIR / 'logs/pdistance_calculator.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(LOG_LEVEL)
return logger
############################## Helper Functions ###############################
# Make window generator
def window_generator(start, stop, WINDOW_SIZE_INT):
return (start + WINDOW_SIZE_INT), (stop + WINDOW_SIZE_INT)
def process_file(f, WINDOW_SIZE_INT, MISSING_CHAR, PDIST_THRESHOLD, PW_REF):
"""
Load fasta file and calculate p-distance for file. Return resulting dataframe.
"""
# Make pandas df to save results
pdist_df = pd.DataFrame()
# Load each chromosome file
alignment = AlignIO.read(f.as_posix(), 'fasta')
logger.info(f"{f.name} alignment loaded, starting p-distance calculation")
calculator = DistanceCalculator('identity')
samples = [r.id for r in alignment]
# Ensure reference sample name provide is found in file
try:
assert PW_REF in samples
except AssertionError:
raise AssertionError("Reference sample provided is not in fasta file")
if pdist_df.empty:
pdist_df = pd.DataFrame(columns=['Chromosome', 'Window', "Sample", "Value"])
start = -WINDOW_SIZE_INT
stop = 0
while True:
win_start, win_stop = window_generator(start, stop, WINDOW_SIZE_INT)
window_aln = alignment[:, win_start:win_stop]
if window_aln.get_alignment_length() == 0:
break
# Check missing data
drop_samples = []
for i in window_aln:
sample_name = i.name
sample_seq = i.seq
missing_freq = sample_seq.count(MISSING_CHAR)/len(sample_seq)
if missing_freq > PDIST_THRESHOLD:
drop_samples.append(sample_name)
else:
continue
dist_matrix = calculator.get_distance(window_aln)
window_contents = {
"Chromosome": [f.stem]*len(samples),
"Window": [win_stop]*len(samples),
"Sample":samples,
"Value":dist_matrix[PW_REF],
}
window_df = | pd.DataFrame(window_contents) | pandas.DataFrame |
import ssl
from sys import exit
from os.path import isfile
from datetime import datetime
import OpenSSL
import click
from socket import setdefaulttimeout
import pandas as pd
from ndg.httpsclient.subj_alt_name import SubjectAltName
from pyasn1.codec.der import decoder
from gsan.clean_df import concat_dfs
from gsan.clean_df import filter_domain
from gsan.clean_df import reindex_df
from gsan.clean_df import strip_chars
from gsan.crtsh import get_crtsh
from gsan.output import dump_filename
from gsan.version import about_message
from gsan.extract_host_port import parse_host_port
@click.group()
@click.version_option(version="4.0.0", message=about_message)
def cli():
"""Get subdomain names from SSL Certificates."""
pass
@cli.command()
@click.argument("domains", nargs=-1)
@click.option("-m", "--match-domain", is_flag=True, help="Match domain name only")
@click.option("-o", "--output", help="Output to path/filename")
@click.option("-t", "--timeout", default=30, type=int, help="Set timeout for CRT.SH")
def crtsh(domains, match_domain, output, timeout):
"""Get domains from crt.sh"""
subdomains_data = []
for domain in domains:
click.secho(f"[+] Getting subdomains for {domain}", bold=True)
subdomain_df = get_crtsh(domain, timeout)
if match_domain:
subdomain_df = filter_domain(subdomain_df, domain)
subdomain_df = reindex_df(subdomain_df)
subdomains_data.append(subdomain_df)
merged_subdomains = concat_dfs(subdomains_data, domains)
click.secho("[+] Results:", bold=True)
print(merged_subdomains.to_string())
if output:
dump_filename(output, merged_subdomains)
@cli.command("scan")
@click.argument("hostnames", nargs=-1)
@click.option("-o", "--output", help="Output to path/filename")
@click.option("-m", "--match-domain", is_flag=True, help="Match domain name only")
@click.option("-c", "--crtsh", is_flag=True, help="Include results from CRT.SH")
@click.option("-t", "--timeout", default=3, help="Set timeout [default: 3]")
@click.option("-s", "--suppress", is_flag=True, help="Suppress output")
def scan_site(hostnames, match_domain, output, crtsh, timeout, suppress):
"""Scan domains from input or a text file, format is HOST[:PORT].
e.g: gsan scan domain1.com domain2.com:port
You can also pass a text file instead, just replace the first domain argument
for a file. eg: gsan scan filename.txt
If no ports are defined, then gsan assumes the port 443 is available."""
subdomains_data = []
subjaltname = SubjectAltName()
if isfile(hostnames[0]):
with open(hostnames[0], "r") as host_file:
hostnames = [host.rstrip("\n") for host in host_file]
hostnames = [parse_host_port(host) for host in hostnames]
else:
hostnames = [parse_host_port(host) for host in hostnames]
bad_hosts = []
for hostname in hostnames:
click.secho(f"[+] Getting subdomains for {hostname[0]}", bold=True)
subdomains = []
port = hostname[1] if hostname[1] else 443
setdefaulttimeout(timeout)
try:
cert = ssl.get_server_certificate((hostname[0], port))
except Exception:
click.secho(f"[!] Unable to connect to host {hostname[0]}", bold=True, fg="red")
bad_hosts.append(hostname[0])
continue
# Thanks to Cato- for this piece of code:
# https://gist.github.com/cato-/6551668
# get all extensions from certificate and iterate until we find a SAN entry
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
for extension_id in range(0, x509.get_extension_count()):
ext = x509.get_extension(extension_id)
ext_name = ext.get_short_name().decode("utf-8")
if ext_name == "subjectAltName":
ext_data = ext.get_data()
decoded_dat = decoder.decode(ext_data, asn1Spec=subjaltname)
for name in decoded_dat:
if isinstance(name, SubjectAltName):
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
subdomains.append(str(component.getComponent()))
subdomain_df = | pd.Series(subdomains) | pandas.Series |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import sys
import pandas as pd
import requests
from datetime import datetime
def prod40(fte, prod):
df = pd.read_csv(fte, encoding='latin-1')
#drop Region = Nan: includes all invalid dates
df = df[df['Region_origen'].notna()]
df['Cod_region_origen'] = df['Cod_region_origen'].astype(int)
df['Cod_region_destino'] = df['Cod_region_destino'].astype(int)
#stardardize fechas
df['Inicio_semana'] = pd.to_datetime(df['Inicio_semana'], format='%d-%m-%Y')
df['Fin_semana'] = pd.to_datetime(df['Fin_semana'], format='%d-%m-%Y')
df['Inicio_semana'] = df['Inicio_semana'].astype(str)
df['Fin_semana'] = df['Fin_semana'].astype(str)
#drop columnas Ano y mes
df.drop(columns=['Año', 'Mes'], inplace=True)
print(df.to_string())
df.to_csv(prod + 'TransporteAereo_std.csv', index=False)
def prod40_from_API(url, api_key, prod):
print('Generating prod40 from API')
response = requests.get(url + api_key)
my_list = response.json()['aéreo nacional - movimientos y pasajeros']
#print(my_list)
df = pd.DataFrame(my_list, dtype=str)
#print(list(df))
# hay que comparar el mes con el principio de inicioSemana y finsemana:
# Si son iguales, corresponde al mes
# si no, corresponde al dia.
for i in range(len(df)):
mes = df.loc[i, 'mes']
iniSemana = df.loc[i, 'inicioSemana']
finDe = df.loc[i, 'finsemana']
anio = df.loc[i,'anio']
print('mes: ' + mes)
print('iniSemana: ' + iniSemana[:2])
print('finDe: ' + finDe[:2])
if int(mes) == int(iniSemana[:2]):
# print('mes primero en inisemana')
df.loc[i, 'inicioSemana'] = pd.to_datetime(df.loc[i, 'inicioSemana'], dayfirst=False)
else:
# print('dia primero en inisemana')
df.loc[i, 'inicioSemana'] = pd.to_datetime(df.loc[i, 'inicioSemana'], dayfirst=True)
if int(mes) == int(finDe[:2]):
# print('mes primero en finde')
df.loc[i, 'finsemana'] = pd.to_datetime(df.loc[i, 'finsemana'], dayfirst=False)
else:
# print('dia primero en finde')
df.loc[i, 'finsemana'] = pd.to_datetime(df.loc[i, 'finsemana'], dayfirst=True)
df['inicioSemana'] = pd.to_datetime(df['inicioSemana'], dayfirst=True)
df['finsemana'] = pd.to_datetime(df['finsemana'], dayfirst=True)
# drop unused columns
df.drop(columns=['anio', 'mes'], inplace=True)
df_localidades = pd.read_csv('../input/JAC/JAC_localidades.csv')
# add to origen codigo_region, y region
df_aux = pd.merge(df, df_localidades, left_on='origen', right_on='Localidad')
df_aux.rename(columns={'semana': 'Semana',
'inicioSemana': 'Inicio_semana',
'finsemana': 'Fin_semana',
'origen': 'Origen',
'destino': 'Destino',
'operaciones': 'Operaciones',
'pasajeros': 'Pasajeros',
'Region': 'Region_origen',
'Cod_region': 'Cod_region_origen'}, inplace=True)
df_aux.drop(columns='Localidad', inplace=True)
# add to destino codigo_region y region
df_aux = | pd.merge(df_aux, df_localidades, left_on='Destino', right_on='Localidad') | pandas.merge |
import pandas as pd
import numpy as np
import json
from tensorflow.keras import backend as K
from face_alignment_keras.models import FAN
from face_alignment_keras.image_generator import get_batch
from face_alignment_keras.image_generator import image_generator
import tensorflow as tf
class LossHistory(tf.keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
config_file = 'configs/FAN4-3D.json'
# read file
with open(config_file, 'r') as myfile:
data = myfile.read()
# parse file
config = json.loads(data)
batch_size = config['batch_size']
num_epoch = config['num_epoch']
custom_loss = True
# load data
df = | pd.read_csv(config['csv_file'], dtype=str) | pandas.read_csv |
# -*- coding: utf-8 -*-
import logging
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
LOGGER = logging.getLogger(__name__)
class OneHotLabelEncoder(object):
"""Combination of LabelEncoder + OneHotEncoder.
>>> df = pd.DataFrame([
... {'a': 'a', 'b': 1, 'c': 1},
... {'a': 'a', 'b': 2, 'c': 2},
... {'a': 'b', 'b': 2, 'c': 1},
... ])
>>> OneHotLabelEncoder().fit_transform(df.a)
a=a a=b
0 1 0
1 1 0
2 0 1
>>> OneHotLabelEncoder(max_labels=1).fit_transform(df.a)
a=a
0 1
1 1
2 0
>>> OneHotLabelEncoder(name='a_name').fit_transform(df.a)
a_name=a a_name=b
0 1 0
1 1 0
2 0 1
"""
def __init__(self, name=None, max_labels=None):
self.name = name
self.max_labels = max_labels
def fit(self, feature):
self.dummies = pd.Series(feature.value_counts().index).astype(str)
if self.max_labels:
self.dummies = self.dummies[:self.max_labels]
def transform(self, feature):
name = self.name or feature.name
dummies = pd.get_dummies(feature.astype(str))
dummies = dummies.reindex(columns=self.dummies, fill_value=0)
dummies.columns = ['{}={}'.format(name, c) for c in self.dummies]
return dummies
def fit_transform(self, feature):
self.fit(feature)
return self.transform(feature)
class FeatureExtractor(object):
"""Single FeatureExtractor applied to multiple features."""
def __init__(self, copy=True, features=None):
self.copy = copy
self.features = features or []
self._features = []
def detect_features(self, X):
features = []
for column in X.columns:
if not np.issubdtype(X[column].dtype, np.number):
features.append(column)
return features
def _fit(self, x):
pass
def fit(self, X, y=None):
if self.features == 'auto':
self._features = self.detect_features(X)
else:
self._features = self.features
for feature in self._features:
self._fit(X[feature])
def _transform(self, x):
pass
def transform(self, X):
if self.copy and self._features:
X = X.copy()
for feature in self._features:
LOGGER.debug("Extracting feature %s", feature)
x = X.pop(feature)
extracted = self._transform(x)
X = pd.concat([X, extracted], axis=1)
return X
def fit_transform(self, X, y=None):
self.fit(X, y)
return self.transform(X)
class CategoricalEncoder(FeatureExtractor):
"""Use the OneHotLabelEncoder only on categorical features.
NOTE: At the moment of this release, sklearn.preprocessing.data.CategoricalEncoder
has not been released yet, this is why we write our own version of it.
>>> df = pd.DataFrame([
... {'a': 'a', 'b': 1, 'c': 1},
... {'a': 'a', 'b': 2, 'c': 2},
... {'a': 'b', 'b': 2, 'c': 1},
... ])
>>> ce = CategoricalEncoder()
>>> ce.fit_transform(df, categorical_features=['a', 'c'])
b a=a a=b c=1 c=2
0 1 1 0 1 0
1 2 1 0 0 1
2 2 0 1 1 0
"""
def __init__(self, max_labels=None, **kwargs):
self.max_labels = max_labels
super(CategoricalEncoder, self).__init__(**kwargs)
def fit(self, X, y=None):
self.encoders = dict()
super(CategoricalEncoder, self).fit(X)
def _fit(self, x):
encoder = OneHotLabelEncoder(x.name, self.max_labels)
encoder.fit(x)
self.encoders[x.name] = encoder
def _transform(self, x):
encoder = self.encoders[x.name]
return encoder.transform(x)
class StringVectorizer(FeatureExtractor):
"""Use the sklearn CountVectorizer only on string features."""
def __init__(self, copy=True, features=None, **kwargs):
self.kwargs = kwargs
super(StringVectorizer, self).__init__(copy, features)
def fit(self, X, y=None):
self.vectorizers = dict()
super(StringVectorizer, self).fit(X)
def _fit(self, x):
vectorizer = CountVectorizer(**self.kwargs)
vectorizer.fit(x.fillna('').astype(str))
self.vectorizers[x.name] = vectorizer
def _transform(self, x):
vectorizer = self.vectorizers[x.name]
bow = vectorizer.transform(x.fillna('').astype(str))
bow_columns = ['{}_{}'.format(x.name, f) for f in vectorizer.get_feature_names()]
return pd.DataFrame(bow.toarray(), columns=bow_columns, index=x.index)
class DatetimeFeaturizer(FeatureExtractor):
"""Extract features from a datetime."""
def detect_features(self, X):
features = []
for column in X.columns:
if np.issubdtype(X[column].dtype, np.datetime64):
features.append(column)
return features
def _transform(self, x):
if not np.issubdtype(x.dtype, np.datetime64):
x = pd.to_datetime(x)
prefix = x.name + '_'
features = {
prefix + 'year': x.dt.year,
prefix + 'month': x.dt.month,
prefix + 'day': x.dt.day,
prefix + 'weekday': x.dt.day,
prefix + 'hour': x.dt.hour,
}
return | pd.DataFrame(features) | pandas.DataFrame |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
sr_115 = StataReader(self.dta16_115).variable_labels()
sr_117 = StataReader(self.dta16_117).variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k,v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
sr = StataReader(path)
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b','h','l')
df = DataFrame([[0.0]],columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
valid_range = | StataReader(path) | pandas.io.stata.StataReader |
import pandas as pd
import pytest
from maker import Board
from utils import match_count
nan = float("nan")
@pytest.mark.parametrize(('player', 'match'), [
(128, 64),
(127, 63),
(100, 36),
(65, 1),
(64, 32),
(3, 1),
(2, 1),
])
def test_match_count(player, match):
assert match_count(player) == match
def pytest_funcarg__board(request):
return Board(match_count=2, keys=["club", "region"])
@pytest.mark.parametrize(("a", "b", "expected"), [
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "a", "region": "west"}),
False),
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "b", "region": nan}),
True),
(pd.Series({"club": "a", "region": nan}),
pd.Series({"club": "b", "region": "west"}),
True),
(pd.Series({"club": "a", "region": "west"}),
pd.Series({"club": "b", "region": "west"}),
False),
])
def test_valid_match(a, b, expected):
board = Board(match_count=2, keys=["club", "region"])
assert board._is_valid(a, b) is expected
@pytest.mark.parametrize(("data"), [
[ | pd.Series({"club": "a"}) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn import cross_validation
train = pd.read_csv('data/train.csv', parse_dates='datetime', index_col='datetime')
test = | pd.read_csv('data/test.csv', parse_dates='datetime', index_col='datetime') | pandas.read_csv |
from __future__ import print_function
# this is a class to deal with aqs data
from builtins import zip
from builtins import range
from builtins import object
import os
from datetime import datetime
from zipfile import ZipFile
import pandas as pd
from numpy import array, arange
import inspect
import requests
class AQS(object):
def __init__(self):
# self.baseurl = 'https://aqs.epa.gov/aqsweb/airdata/'
self.objtype = 'AQS'
self.daily = False
self.baseurl = 'https://aqsdr1.epa.gov/aqsweb/aqstmp/airdata/'
self.dates = [datetime.strptime('2014-06-06 12:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2014-06-06 13:00:00', '%Y-%m-%d %H:%M:%S')]
self.renamedhcols = ['datetime_local', 'datetime', 'State_Code', 'County_Code',
'Site_Num', 'Parameter_Code', 'POC', 'Latitude', 'Longitude',
'Datum', 'Parameter_Name', 'Obs', 'Units',
'MDL', 'Uncertainty', 'Qualifier', 'Method_type', 'Method_Code',
'Method_Name', 'State_Name', 'County_Name', 'Date_of_Last_Change']
self.renameddcols = ['datetime_local', 'State_Code', 'County_Code', 'Site_Num',
'Parameter_Code', 'POC', 'Latitude', 'Longitude', 'Datum',
'Parameter_Name', 'Sample_Duration', 'Pollutant_Standard',
'Units', 'Event_Type', 'Observation_Count',
'Observation_Percent', 'Obs', '1st_Max_Value',
'1st_Max Hour', 'AQI', 'Method_Code', 'Method_Name',
'Local_Site_Name', 'Address', 'State_Name', 'County_Name',
'City_Name', 'MSA_Name', 'Date_of_Last_Change']
self.savecols = ['datetime_local', 'datetime', 'SCS',
'Latitude', 'Longitude', 'Obs', 'Units', 'Species']
self.se_states = array(
['Alabama', 'Florida', 'Georgia', 'Mississippi', 'North Carolina', 'South Carolina', 'Tennessee',
'Virginia', 'West Virginia'], dtype='|S14')
self.se_states_abv = array(
['AL', 'FL', 'GA', 'MS', 'NC', 'SC', 'TN',
'VA', 'WV'], dtype='|S14')
self.ne_states = array(['Connecticut', 'Delaware', 'District Of Columbia', 'Maine', 'Maryland', 'Massachusetts',
'New Hampshire', 'New Jersey', 'New York', 'Pennsylvania', 'Rhode Island', 'Vermont'],
dtype='|S20')
self.ne_states_abv = array(['CT', 'DE', 'DC', 'ME', 'MD', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT'],
dtype='|S20')
self.nc_states = array(
['Illinois', 'Indiana', 'Iowa', 'Kentucky', 'Michigan',
'Minnesota', 'Missouri', 'Ohio', 'Wisconsin'],
dtype='|S9')
self.nc_states_abv = array(['IL', 'IN', 'IA', 'KY', 'MI', 'MN', 'MO', 'OH', 'WI'],
dtype='|S9')
self.sc_states = array(
['Arkansas', 'Louisiana', 'Oklahoma', 'Texas'], dtype='|S9')
self.sc_states_abv = array(['AR', 'LA', 'OK', 'TX'], dtype='|S9')
self.r_states = array(['Arizona', 'Colorado', 'Idaho', 'Kansas', 'Montana', 'Nebraska', 'Nevada', 'New Mexico',
'North Dakota', 'South Dakota', 'Utah', 'Wyoming'], dtype='|S12')
self.r_states_abv = array(['AZ', 'CO', 'ID', 'KS', 'MT', 'NE', 'NV', 'NM', 'ND', 'SD', 'UT', 'WY'],
dtype='|S12')
self.p_states = array(
['California', 'Oregon', 'Washington'], dtype='|S10')
self.p_states_abv = array(['CA', 'OR', 'WA'], dtype='|S10')
self.datadir = '.'
self.cwd = os.getcwd()
self.df = None # hourly dataframe
self.monitor_file = inspect.getfile(
self.__class__)[:-13] + '/data/monitoring_site_locations.dat'
self.monitor_df = None
self.d_df = None # daily dataframe
def check_file_size(self, url):
test = requests.head(url).headers
if int(test['Content-Length']) > 1000:
return True
else:
return False
def retrieve_aqs_hourly_pm25_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url1 = self.baseurl + 'hourly_88101_' + year + '.zip'
if self.check_file_size(url1):
print('Downloading Hourly PM25 FRM: ' + url1)
filename = wget.download(url1)
print('')
print('Unpacking: ' + url1)
dffrm = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
dffrm.columns = self.renamedhcols
dffrm['SCS'] = array(
dffrm['State_Code'].values * 1.E7 +
dffrm['County_Code'].values * 1.E4 + dffrm['Site_Num'].values,
dtype='int32')
else:
dffrm = pd.DataFrame(columns=self.renamedhcols)
url2 = self.baseurl + 'hourly_88502_' + year + '.zip'
if self.check_file_size(url2):
print('Downloading Hourly PM25 NON-FRM: ' + url2)
filename = wget.download(url2)
print('')
print('Unpacking: ' + url2)
dfnfrm = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
dfnfrm.columns = self.renamedhcols
dfnfrm['SCS'] = array(
dfnfrm['State_Code'].values * 1.E7 +
dfnfrm['County_Code'].values *
1.E4 + dfnfrm['Site_Num'].values,
dtype='int32')
else:
dfnfrm = pd.DataFrame(columns=self.renamedhcols)
if self.check_file_size(url1) | self.check_file_size(url2):
df = pd.concat([dfnfrm, dffrm], ignore_index=True)
df.loc[:, 'State_Code'] = pd.to_numeric(
df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
# df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
df['Species'] = 'PM2.5'
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_PM_25_88101_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_PM_25_88101_' +
year + '.hdf', 'df', format='table')
else:
df = pd.DataFrame(columns=self.renamedhcols)
return df
def retrieve_aqs_hourly_ozone_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_44201_' + year + '.zip'
print('Downloading Hourly Ozone: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_OZONE_44201_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_OZONE_44201_' +
year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_pm10_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_81102_' + year + '.zip'
print('Downloading Hourly PM10: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_PM_10_81102_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_PM_10_81102_' +
year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_so2_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42401_' + year + '.zip'
print('Downloading Hourly SO2: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_SO2_42401_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_SO2_42401_' + year +
'.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_no2_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42602_' + year + '.zip'
print('Downloading Hourly NO2: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_NO2_42602_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_NO2_42602_' + year +
'.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_co_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42101_' + year + '.zip'
print('Downloading Hourly CO: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_CO_42101_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_CO_42101_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_nonoxnoy_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_NONOxNOy_' + year + '.zip'
print('Downloading Hourly NO NOx NOy: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_NONOXNOY_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_NONOXNOY_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_voc_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_VOCS_' + year + '.zip'
print('Downloading Hourly VOCs: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df, voc=True)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_VOC_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_VOC_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_spec_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_SPEC_' + year + '.zip'
if self.check_file_size(url):
print('Downloading PM Speciation: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(
df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_SPEC_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_SPEC_' + year + '.hdf', 'df', format='table')
return df
else:
return pd.DataFrame(columns=self.renamedhcols)
def retrieve_aqs_hourly_wind_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_WIND_' + year + '.zip'
print('Downloading AQS WIND: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_WIND_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_WIND_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_temp_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_TEMP_' + year + '.zip'
print('Downloading AQS TEMP: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_TEMP_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_TEMP_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_rhdp_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_RH_DP_' + year + '.zip'
print('Downloading AQS RH and DP: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_RHDP_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_RHDP_' + year + '.hdf', 'df', format='table')
return df
def load_aqs_pm25_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_PM_25_88101_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_pm25_data(dates)
if aqs.empty:
return aqs
else:
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_voc_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_VOC_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_voc_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_ozone_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_OZONE_44201_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_ozone_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.Units = 'ppb'
aqs.Obs = aqs.Obs.values * 1000.
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_pm10_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_PM_10_81102_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_pm10_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_so2_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_SO2_42401_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_so2_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_no2_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_NO2_42602_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_no2_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_co_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_CO_42101_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_co_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_spec_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_SPEC_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
print('Retrieving Data')
aqs = self.retrieve_aqs_hourly_spec_data(dates)
if aqs.empty:
return pd.DataFrame(columns=self.renamedhcols)
else:
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_wind_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_WIND_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_wind_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_temp_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_TEMP_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_temp_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_rhdp_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_RHDP_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_rhdp_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_nonoxnoy_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_NONOXNOY_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_nonoxnoy_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_data(self, param, dates):
if param == 'PM2.5':
df = self.load_aqs_pm25_data(dates)
elif param == 'PM10':
df = self.load_aqs_pm10_data(dates)
elif param == 'SPEC':
df = self.load_aqs_spec_data(dates)
elif param == 'CO':
df = self.load_aqs_co_data(dates)
elif param == 'OZONE':
df = self.load_aqs_ozone_data(dates)
elif param == 'SO2':
df = self.load_aqs_so2_data(dates)
elif param == 'VOC':
df = self.load_aqs_voc_data(dates)
elif param == 'NONOXNOY':
df = self.load_aqs_nonoxnoy_data(dates)
elif param == 'WIND':
df = self.load_aqs_wind_data(dates)
elif param == 'TEMP':
df = self.load_aqs_temp_data(dates)
elif param == 'RHDP':
df = self.load_aqs_rhdp_data(dates)
return df
def load_daily_data(self, param, dates):
if param == 'PM2.5':
df = self.load_aqs_daily_pm25_data(dates)
elif param == 'PM10':
df = self.load_aqs_daily_pm10_data(dates)
elif param == 'SPEC':
df = self.load_aqs_daily_spec_data(dates)
elif param == 'CO':
df = self.load_aqs_daily_co_data(dates)
elif param == 'OZONE':
df = self.load_aqs_daily_no2_data(dates)
elif param == 'SO2':
df = self.load_aqs_daily_so2_data(dates)
elif param == 'VOC':
df = self.load_aqs_daily_voc_data(dates)
elif param == 'NONOXNOY':
df = self.load_aqs_daily_nonoxnoy_data(dates)
elif param == 'WIND':
df = self.load_aqs_daily_wind_data(dates)
elif param == 'TEMP':
df = self.load_aqs_daily_temp_data(dates)
elif param == 'RHDP':
df = self.load_aqs_daily_rhdp_data(dates)
return df
def load_all_hourly_data2(self, dates, datasets='all'):
import dask
import dask.dataframe as dd
os.chdir(self.datadir)
params = ['SPEC', 'PM10', 'PM2.5', 'CO', 'OZONE',
'SO2', 'VOC', 'NONOXNOY', 'WIND', 'TEMP', 'RHDP']
dfs = [dask.delayed(self.load_data)(i, dates) for i in params]
dff = dd.from_delayed(dfs)
# dff = dff.drop_duplicates()
self.df = dff.compute()
self.df = self.change_units(self.df)
# self.df = pd.concat(dfs, ignore_index=True)
# self.df = self.change_units(self.df).drop_duplicates(subset=['datetime','SCS','Species','Obs']).dropna(subset=['Obs'])
os.chdir(self.cwd)
def load_all_daily_data(self, dates, datasets='all'):
import dask
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
os.chdir(self.datadir)
pbar = ProgressBar()
pbar.register()
params = ['SPEC', 'PM10', 'PM2.5', 'CO', 'OZONE',
'SO2', 'VOC', 'NONOXNOY', 'WIND', 'TEMP', 'RHDP']
# dfs = [dask.delayed(self.load_daily_data)(i,dates) for i in params]
# print dfs
# dff = dd.from_delayed(dfs)
# self.d_df = dff.compute()
dfs = [self.load_daily_data(i, dates) for i in params]
self.d_df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
"""PubMed Crawler of CSBC/PS-ON Publications.
author: nasim.sanati
author: milen.nikolov
author: verena.chung
"""
import os
import re
import argparse
import getpass
import ssl
from datetime import datetime
import requests
from Bio import Entrez
from bs4 import BeautifulSoup
import synapseclient
import pandas as pd
from alive_progress import alive_bar
def login():
"""Log into Synapse. If cached info not found, prompt user.
Returns:
syn: Synapse object
"""
try:
syn = synapseclient.login(silent=True)
except Exception:
print("Cached credentials not found; please provide",
"your Synapse username and password.")
username = input("Synapse username: ")
password = getpass.getpass("Synapse password: ").encode("utf-8")
syn = synapseclient.login(
username=username, password=password,
rememberMe=True, silent=True)
return syn
def get_args():
"""Set up command-line interface and get arguments."""
parser = argparse.ArgumentParser(
description="Scrap PubMed information from a list of grant numbers"
+ " and put the results into a CSV file. Table ID can be provided"
+ " if interested in only scrapping for new publications.")
# TODO: default to the grants table/view in the "CSBC PS-ON DB" project
parser.add_argument("-g", "--grantview_id",
type=str, default="syn21918972",
help="Synapse table/view ID containing grant numbers in"
+ " 'grantNumber' column. (Default: syn21918972)")
parser.add_argument("-t", "--table_id",
type=str,
help="Current Synapse table holding PubMed info.")
parser.add_argument("-f", "--table_file",
type=str,
help="Local file table holding PubMed info.")
parser.add_argument("-o", "--output",
type=str, default="publications_"
+ datetime.today().strftime('%m-%d-%Y'),
help="Filename for output CSV. (Default:"
+ " publications_<current-date>)")
return parser.parse_args()
def get_view(syn, table_id):
"""Get Synapse table/data view containing grant numbers.
Assumptions:
Syanpse table/view has column called 'grantNumber'
Returns:
dataframe: consortiums and their project descriptions.
"""
results = syn.tableQuery(
f"select * from {table_id}").asDataFrame()
return results[~results['grantNumber'].isnull()]
def get_grants(df):
"""Get list of grant numbers from dataframe.
Assumptions:
Dataframe has column called 'grantNumber'
Returns:
set: valid grant numbers, e.g. non-empty strings
"""
print(f"Querying for grant numbers...", end="")
grants = set(df.grantNumber.dropna())
print(f"{len(grants)} found\n")
return list(sorted(grants))
def get_pmids(grants, year_start=2018, year_end=2021):
"""Get list of PubMed IDs using grant numbers as search param.
Returns:
set: PubMed IDs
"""
print("Getting PMIDs from NCBI...")
all_pmids = set()
# Brian's request: add check that pubs. are retreived for each grant number
count = 1
for grant in grants:
print(f" {count:02d}. Grant number {grant}...", end="")
handle = Entrez.esearch(db="pubmed", term=grant,
datetype="pdat", mindate=year_start, maxdate=year_end,
retmax=1_000_000, retmode="xml", sort="relevance")
pmids = Entrez.read(handle).get('IdList')
handle.close()
all_pmids.update(pmids)
print(f"{len(pmids)} found")
count += 1
print(f"Total unique publications: {len(all_pmids)}\n")
return all_pmids
def parse_header(header):
"""Parse header div for pub. title, authors journal, year, and doi."""
# TITLE
title = header.find('h1').text.strip()
# JOURNAL
journal = header.find('button').text.strip()
# PUBLICATION YEAR
pub_date = header.find('span', attrs={'class': "cit"}).text
year = re.search(r"(\d{4}).*?[\.;]", pub_date).group(1)
# DOI
doi_cit = header.find(attrs={'class': "citation-doi"})
doi = doi_cit.text.strip().lstrip("doi: ").rstrip(".") if doi_cit else ""
# AUTHORS
authors = [parse_author(a) for a in header.find_all(
'span', attrs={'class': "authors-list-item"})]
authors = [a for a in authors if a]
return (title, journal, year, doi, authors)
def parse_author(item):
"""Parse author name from HTML 'author-list-item"""
try:
author = item.find('a', attrs={'class': "full-name"}).text
except AttributeError:
author = item.find('span', attrs={'class': "full-name"}).text
return author
def parse_grant(grant):
"""Parse for grant number from grant annotation."""
if len(grant):
grant = re.sub(r'RO', 'R0', grant)
grant_info = re.search(r"([A-Z][A-Z](\s|-)*\d{3,})[ /-]", grant, re.I)
if grant_info is not None:
grant_number = grant_info.group(1).upper()
return re.sub(r'(\s|-)', '', grant_number)
def get_related_info(pmid):
"""Get related information associated with publication.
Entrez will be used for optimal retrieval (since NCBI will kick
us out if we web-scrap too often).
Returns:
dict: XML results for GEO, SRA, and dbGaP
"""
handle = Entrez.elink(dbfrom="pubmed", db="gds,sra,gap", id=pmid,
remode="xml")
results = Entrez.read(handle)[0].get('LinkSetDb')
handle.close()
related_info = {}
for result in results:
db = re.search(r"pubmed_(.*)", result.get('LinkName')).group(1)
ids = [link.get('Id') for link in result.get('Link')]
handle = Entrez.esummary(db=db, id=",".join(ids))
soup = BeautifulSoup(handle, "lxml")
handle.close()
related_info[db] = soup
return related_info
def parse_geo(info):
"""Parse and return GSE IDs."""
gse_ids = []
if info:
tags = info.find_all('item', attrs={'name': "GSE"})
gse_ids = ["GSE" + tag.text for tag in tags]
return gse_ids
def parse_sra(info):
"""Parse and return SRX/SRP IDs."""
srx_ids = srp_ids = []
if info:
tags = info.find_all('item', attrs={'name': "ExpXml"})
srx_ids = [re.search(r'Experiment acc="(.*?)"', tag.text).group(1)
for tag in tags]
srp_ids = {re.search(r'Study acc="(.*?)"', tag.text).group(1)
for tag in tags}
return srx_ids, srp_ids
def parse_dbgap(info):
"""Parse and return study IDs."""
gap_ids = []
if info:
tags = info.find_all('item', attrs={'name': "d_study_id"})
gap_ids = [tag.text for tag in tags]
return gap_ids
def make_urls(url, accessions):
"""Create NCBI link for each accession in the iterable.
Returns:
str: list of URLs
"""
url_list = [url + accession for accession in list(accessions)]
return ", ".join(url_list)
def scrape_info(pmids, curr_grants, grant_view):
"""Create dataframe of publications and their pulled data.
Returns:
df: publications data
"""
columns = ["doi", "journal", "pubMedId", "pubMedUrl",
"publicationTitle", "publicationYear", "keywords",
"authors", "grantNumber",
"gseAccns", "gseUrls", "srxAccns", "srxUrls",
"srpAccns", "srpUrls", "dbgapAccns", "dbgapUrls"]
if not os.environ.get('PYTHONHTTPSVERIFY', '') \
and getattr(ssl, '_create_unverified_context', None):
ssl._create_default_https_context = ssl._create_unverified_context
table = []
with alive_bar(len(pmids)) as progress:
for pmid in pmids:
session = requests.Session()
url = f"https://www.ncbi.nlm.nih.gov/pubmed/?term={pmid}"
soup = BeautifulSoup(session.get(url).content, "lxml")
# HEADER
# Contains: title, journal, pub. date, authors, pmid, doi
header = soup.find(attrs={'id': "full-view-heading"})
# PubMed utilizes JavaScript now, so content does not always
# fully load on the first try.
if not header:
soup = BeautifulSoup(session.get(url).content, "lxml")
header = soup.find(attrs={'id': "full-view-heading"})
title, journal, year, doi, authors = parse_header(header)
authors = ", ".join(authors)
# GRANTS
try:
grants = [g.text.strip() for g in soup.find(
'div', attrs={'id': "grants"}).find_all('a')]
# Filter out grant annotations not in consortia.
grants = {parse_grant(grant) for grant in grants}
# if re.search(r"CA\d", grant, re.I)}
grants = list(filter(lambda x: x in curr_grants, grants))
except AttributeError:
grants = []
# KEYWORDS
abstract = soup.find(attrs={"id": "abstract"})
try:
keywords = abstract.find(text=re.compile(
"Keywords")).find_parent("p").text.replace(
"Keywords:", "").strip()
except AttributeError:
keywords = ""
# RELATED INFORMATION
# Contains: GEO, SRA, dbGaP
related_info = get_related_info(pmid)
gse_ids = parse_geo(related_info.get('gds'))
gse_url = make_urls(
"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=", gse_ids)
srx, srp = parse_sra(related_info.get('sra'))
srx_url = make_urls("https://www.ncbi.nlm.nih.gov/sra/", srx)
srp_url = make_urls(
"https://trace.ncbi.nlm.nih.gov/Traces/sra/?study=", srp)
dbgap = parse_dbgap(related_info.get('gap'))
dbgap_url = make_urls(
"https://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/study.cgi?study_id=",
dbgap
)
row = pd.DataFrame(
[[doi, journal, pmid, url, title, year, keywords, authors,
grants, gse_ids, gse_url,
srx, srx_url, list(srp), srp_url, dbgaps, dbgap_url]],
columns=columns)
table.append(row)
session.close()
# Save table
tmp_tbl = | pd.concat(table) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:32:14 2021
@author: alber
"""
import pandas as pd
import numpy as np
def turn_rules_to_df(list_rules, list_cols):
"""
Function to transform the results from dt_rules() into a dataframe with the
max and min values for each feature.
There are only two limis per feature (max and min). If there are not enough
information on the rule to obtain both values (p.e. rule = "x > 10") then
a default value is applied over the missing limit (-np.inf for min and np.inf
for max; p.e. "x > 10" turns to "x_min = 10, x_max = np.inf").
If there are duplicated information, the limits keep the strictest value
(p.e. "x > 10 & x > 8 & x < 30" turns to "x_max = 30, x_min = 10").
Parameters
----------
list_rules : list
A list with the following structure (example):
['gdpuls > -83.5',
'gdpuls > -79.5',
'gdenergy > -76.0',
'gdenergy > -79.0 & gdpuls > -70.0'
]
list_cols : list
List of features considered. For instance, for the example above:
feature_cols = ['gdenergy', 'gdpuls']
Returns
-------
df_rules : dataframe
A dataframe with two columns per feature (max/min) and one row
per rule. For the example above:
gdenergy_max gdenergy_min gdpuls_max gdpuls_min
inf -inf inf -83.5
inf -inf inf -79.5
inf -76.0 inf -inf
inf -79.0 inf -70.0
inf -inf inf -78.5
"""
df_rules = pd.DataFrame()
if len(list_rules) == 0:
print("Warning: Rule list is empty: returning a DF without Rules")
return pd.DataFrame({col + "_max": [] for col in list_cols}).append(
| pd.DataFrame({col + "_min": [] for col in list_cols}) | pandas.DataFrame |
"""
Supplementary Fig. 2
"""
"""This script is used to benchmark GLIPH's performance across a variety of clustering thresholds by
varying the hamming distance parameter. This script required a local installation of GLIPH. The output
of this script is saved as GLIPH.csv and can be found in the github repository."""
import pandas as pd
import os
import glob
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
directory = '../../Data/Glanville/'
antigens = os.listdir(directory)
seq = []
label = []
for antigen in antigens:
df = pd.read_csv(os.path.join(directory,antigen,antigen+'.tsv'),sep='\t')
seq.extend(df['aminoAcid'].tolist())
label.extend([antigen]*len(df))
df_out = pd.DataFrame()
df_out['Sequences'] = seq
df_out.to_csv('cdr3.txt',index=False)
df_ref = pd.DataFrame()
df_ref['Beta_Sequences'] = seq
df_ref['Labels'] = label
df_ref_dict = df_ref.set_index('Beta_Sequences').T.to_dict('list')
total_seq = len(seq)
num_clusters = []
variance = []
x=[]
y=[]
r = np.asarray(range(5))
for t in r:
#Erase previous GLIPH outputs
files = glob.glob('cdr3*')
for file in files:
if file != 'cdr3.txt':
os.remove(file)
#Run GLIPH
os.system('gliph/bin/gliph-group-discovery.pl --tcr cdr3.txt --gccutoff='+str(t))
df_in = | pd.read_csv('cdr3-convergence-groups.txt',sep='\t',header=None) | pandas.read_csv |
# -*- coding: utf-8; py-indent-offset:4 -*-
import time
import datetime as dt
import pandas as pd
import akshare as ak
from ..utils import earn_to_annual, dict_from_df
_SINA_DOWNLOAD_DELAY = 1
def download_cn_stock_list(param= None, verbose= False):
df = ak.stock_info_a_code_name()
df.columns = ['symbol', 'name']
if verbose:
print(df)
return df
def download_cn_index_list(param, verbose = False):
df = ak.index_stock_info()
df.columns = ['symbol','name','publish_date']
df['symbol'] = df['symbol'].apply(lambda x: 'sh'+x if (x[0]=='0') else 'sz'+x)
df = df[['symbol', 'name']]
if verbose:
print(df)
return df
def download_cn_index_stocks_list(symbol, verbose = False):
if symbol.startswith('sh') or symbol.startswith('sz'):
symbol = symbol[2:]
df = ak.index_stock_cons(index= symbol)
df.columns = ['symbol','name','publish_date']
df = df[['symbol', 'name']]
if verbose:
print(df)
return df
else:
raise ValueError('unknown index: ' + symbol)
def download_hk_stock_list(param= None, verbose= False):
df = ak.stock_hk_spot()
# columns: symbol, name, engname, tradetype, lasttrade, prevclose, open, high, low, volume, amount, ticktime, buy, sell, pricechange, changepercent
df = df[['symbol', 'name']]
if verbose:
print(df)
return df
# TODO:
def download_hk_index_list(param, verbose = False):
df = pd.DataFrame([], columns=['symbol','name'])
# TODO:
return df
'''
def download_cn_index_daily( symbol ):
daily_df = ak.stock_zh_index_daily(symbol = symbol)
daily_df['date'] = pd.to_datetime(daily_df.index.date)
daily_df.set_index('date', inplace=True, drop=True)
return daily_df
'''
'''
def _download_cn_stock_daily( symbol, adjust = '' ):
start = '20000101'
end = dt.datetime.now().strftime('%Y%m%d')
if len(symbol) == 5:
df = ak.stock_hk_daily(symbol=symbol, adjust = adjust)
time.sleep( _SINA_DOWNLOAD_DELAY )
df.index = df.index.set_names('date')
elif len(symbol) == 6:
if symbol[0] == '6':
symbol = 'sh' + symbol
elif symbol[0] in ['0','3']:
symbol = 'sz' + symbol
df = ak.stock_zh_a_daily(symbol=symbol, start_date=start, end_date=end, adjust = adjust)
time.sleep( _SINA_DOWNLOAD_DELAY )
else:
raise ValueError('unknown symbol: ' + symbol)
if 'date' in df.columns:
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace= True, drop= True)
df = df.astype(float)
df.sort_index(ascending=True, inplace=True)
# drop invalid data
if 'close' in df.columns:
df = df[df['close'] > 0.01]
return df
def _download_stock_daily_adjust_factor( symbol, adjust ):
df = _download_cn_stock_daily( symbol, adjust )
if len(symbol) == 6:
df.columns = ['factor']
elif len(symbol) == 5:
df.columns = ['factor', 'cash']
else:
raise ValueError('unknown symbol: ' + symbol)
return df[['factor']]
#
# download cn stock daily from yahoo is more efficient
# (1) yahoo will not block IP
# (2) yahoo provide adj_close with close, easier to calc adjust factor
#
def download_cn_stock_daily( symbol ):
print('\rfetching history data {} ...'.format(symbol))
daily_df = _download_cn_stock_daily( symbol )
factor_df = _download_stock_daily_adjust_factor( symbol, 'hfq-factor' )
df = pd.merge(
daily_df, factor_df, left_index=True, right_index=True, how='outer'
)
df = df.fillna(method='ffill').astype(float).dropna().drop_duplicates(subset=['open', 'high', 'low', 'close', 'volume'])
df['factor'] = df['factor'] / df['factor'].iloc[-1]
df['adj_close'] = df['close'] * df['factor']
return df
'''
def download_finance_report(symbol, report_type = 'balance'):
en_zh = {'balance':'资产负债表', 'income':'利润表', 'cashflow':'现金流量表'}
if report_type in en_zh:
pass
else:
raise ValueError('Unknown finance report type: ' + report_type)
print('fetching ' + report_type + ' report ...', symbol)
df = ak.stock_financial_report_sina(symbol, en_zh[ report_type ])
time.sleep(_SINA_DOWNLOAD_DELAY)
df.rename(columns={'报表日期':'date'}, inplace= True)
val = df.iloc[0]['date']
if type(val) == str:
if '-' in val:
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
else:
df['date'] = pd.to_datetime(df['date'], format='%Y%m%d')
return df
def download_finance_balance_report(symbol):
return download_finance_report(symbol, 'balance')
def download_finance_income_report(symbol):
return download_finance_report(symbol, 'income')
def download_finance_cashflow_report(symbol):
return download_finance_report(symbol, 'cashflow')
def extract_data_from_report(report_df, data_mapping):
df = pd.DataFrame()
cols = report_df.columns
for key, fields in data_mapping.items():
found = False
for field in fields:
if field in cols:
df[key] = report_df[field]
found = True
if not found:
raise ValueError('Column not found in report: ' + key)
df = df.astype(float)
df.index = pd.to_datetime( report_df['date'] )
return df
def extract_abstract_from_report(symbol, balance_df, income_df, cashflow_df):
df1 = extract_data_from_report(balance_df, {
'assets': ['资产总计'],
'debt': ['负债合计'],
'equity': ['股东权益合计','所有者权益合计','所有者权益(或股东权益)合计'],
'shares': ['股本','实收资本(或股本)']
})
df1 = df1[~df1.index.duplicated(keep='first')]
df1 = df1[df1['shares'] > 0]
df2 = extract_data_from_report(income_df, {
# income sheet
'revenue': ['一、营业收入', '一、营业总收入'],
'cost': ['二、营业支出','二、营业总成本'],
'earning': ['五、净利润'],
})
df2 = df2[~df2.index.duplicated(keep='first')]
df3 = extract_data_from_report(cashflow_df, {
# cashflow sheet
'operate': ['经营活动产生的现金流量净额'],
'invest': ['投资活动产生的现金流量净额'],
'financing': ['筹资活动产生的现金流量净额'],
'cash': ['六、期末现金及现金等价物余额']
})
df3 = df3[~df3.index.duplicated(keep='first')]
df = pd.DataFrame()
x1 = set(df1.index.tolist())
x2 = set(df2.index.tolist())
x3 = set(df3.index.tolist())
if len(x1 ^ x2) > 0 or len(x1 ^ x3) > 0:
xu = x1.union(x2, x3)
#info = symbol + ': some quarter report missing\n'
diff = {
'balance': (xu-x1),
'income': (xu-x2),
'cashflow': (xu-x3),
}
for k, v in diff.items():
dates = []
for date in v:
dates.append(date.strftime('%Y-%m-%d'))
dates.sort()
#info = info + '\t' + k + ': ' + ', '.join(dates) + '\n'
#print(info)
df = pd.concat([df1, df2, df3], axis=1, join='inner')
df = df.astype({
'assets':'float',
'debt':'float',
'equity':'float',
'shares':'float',
'revenue':'float',
'cost':'float',
'earning':'float',
'operate':'float',
'invest':'float',
'financing':'float',
'cash':'float',
})
# sort the date from early to late
df = df.sort_index(ascending= True)
return df
def download_ipo(symbol):
print('fetching ipo info ...')
df = ak.stock_ipo_info(stock= symbol)
time.sleep(_SINA_DOWNLOAD_DELAY)
return df
def download_dividend_history(symbol):
print('fetching dividend info ...')
df = ak.stock_history_dividend_detail(indicator="分红", stock=symbol, date='')
time.sleep(_SINA_DOWNLOAD_DELAY)
df = df[df['进度'] == '实施']
df.index = pd.to_datetime(df['公告日期'])
df = df.astype({
'公告日期':'datetime64',
#'除权除息日':'datetime64',
#'股权登记日':'datetime64',
})
df.sort_index(ascending= True, inplace= True)
return df
def download_rightissue_history(symbol):
print('fetching rightissue info ...')
df = ak.stock_history_dividend_detail(indicator="配股", stock=symbol, date='')
time.sleep(_SINA_DOWNLOAD_DELAY)
df = df[df['查看详细'] == '查看']
df.index = | pd.to_datetime(df['公告日期']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# # IRM Analysis
# This notebook will compare the performance of IRM on an unseen platform's worth of gene expression to that of ERM. These results will be used for the preliminary data section for Aim 2 in my prelim proposal.
#
# For more information on what IRM and ERM are, read [Invariant Risk Minimization by Arjovsky et al.](https://arxiv.org/abs/1907.02893)
#
# The EDA code is [here](#EDA), or to skip to the analysis, go [here](#eval)
# <a id='eda'></a>
# ## Sepsis EDA
#
# To have a good measure of training performance, ideally we'll have one platform's data held out as a validation set. To see how possible that is, we'll do exploratory data analysis on the sepsis studies in the dataset.
# In[1]:
import itertools
import json
import os
import sys
from pathlib import Path
import pandas as pd
import sklearn.metrics as metrics
import sklearn.preprocessing as preprocessing
import torch
from plotnine import *
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from whistl import datasets
from whistl.datasets import CompendiumDataset
from whistl import models
from whistl import train
from whistl import utils
# In[2]:
import random
import numpy as np
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(42)
random.seed(42)
# In[3]:
curr_path = str(Path('.'))
map_file = str(Path('../../data/sample_classifications.pkl'))
sample_to_label = utils.parse_map_file(map_file)
sample_ids = sample_to_label.keys()
metadata_file = str(Path('../../data/all_metadata.json'))
metadata_json = json.load(open(metadata_file))
sample_metadata = metadata_json['samples']
sample_ids = utils.filter_invalid_samples(sample_metadata, sample_ids)
sample_to_platform = utils.map_sample_to_platform(metadata_json, sample_ids)
sample_to_study = utils.map_sample_to_study(metadata_json, sample_ids)
# In[4]:
compendium_path = str(Path('../../data/subset_compendium.tsv'))
compendium_df = datasets.load_compendium_file(compendium_path)
compendium_df.head()
# In[5]:
sepsis_samples = [sample for sample in sample_ids if sample_to_label[sample] == 'sepsis']
sepsis_platforms = [sample_to_platform[sample] for sample in sepsis_samples]
sepsis_studies = [sample_to_study[sample] for sample in sepsis_samples]
print(len(sepsis_samples))
print(len(sepsis_platforms))
print(len(sepsis_studies))
# In[6]:
sepsis_metadata_dict = {'sample': sepsis_samples, 'platform': sepsis_platforms, 'study': sepsis_studies}
sepsis_metadata_df = pd.DataFrame(sepsis_metadata_dict)
sepsis_metadata_df = sepsis_metadata_df.set_index('sample')
sepsis_metadata_df.head()
# In[7]:
sepsis_metadata_df['platform'].value_counts()
# In[8]:
sepsis_metadata_df[sepsis_metadata_df['platform'] == 'affymetrix human genome u133a array (hgu133a)']
# In[9]:
# Remove platform with only one sample to reduce downstream variance
sepsis_metadata_df = sepsis_metadata_df.drop(labels='GSM301847', axis=0)
print(len(sepsis_metadata_df.index))
# In[10]:
sepsis_metadata_df['study'].value_counts()
# <a id='eval'></a>
# ## IRM Evaluation
# ### Setup
# In[11]:
curr_path = os.path.dirname(os.path.abspath(os.path.abspath('')))
map_file = str(Path('../../data/sample_classifications.pkl'))
sample_to_label = utils.parse_map_file(map_file)
metadata_path = str(Path('../../data/all_metadata.json'))
compendium_path = str(Path('../../data/subset_compendium.tsv'))
# ### More setup
# Initialize the model and encoder for the training process
# In[12]:
classes = ['sepsis', 'healthy']
encoder = preprocessing.LabelEncoder()
encoder.fit(classes)
# ### Tune split
# We will get a rough estimate of performance with leave-one-out cross-validation. To know when to stop training, though, we will need a tuning dataset.
# In[13]:
tune_df = sepsis_metadata_df[sepsis_metadata_df['platform'] == 'affymetrix human genome u219 array (hgu219)']
train_df = sepsis_metadata_df[sepsis_metadata_df['platform'] != 'affymetrix human genome u219 array (hgu219)']
print(len(tune_df.index))
print(len(train_df.index))
tune_studies = tune_df['study'].unique()
tune_dataset = CompendiumDataset(tune_studies, classes,
sample_to_label, metadata_path,
compendium_path, encoder)
tune_loader = DataLoader(tune_dataset, batch_size=1)
# ### Filter Platforms
# Remove a platform that corresponds to a study present in the labeled data, but not the human compendium
# In[14]:
platforms = train_df['platform'].unique()
platforms = [p
for p in platforms
if p != 'affymetrix human human exon 1.0 st array (huex10st)'
]
num_seeds = 5
# ## Training
#
# The models are trained with two platforms held out.
# One platform (huex10st) is left out in all runs, and is used as a tuning set to determine which version of the model should be saved.
# The second platform (referred to going forward as the 'held-out platform') is held out during training, then the trained model's performance is evaluated by trying to predict whether each sample corresponds to sepsis or healthy expression.
# In[15]:
irm_result_list = []
erm_result_list = []
for hold_out_platform in platforms:
train_platforms = train_df[train_df['platform'] != hold_out_platform]['platform'].unique()
train_loaders = []
total_irm_samples = 0
for platform in train_platforms:
studies = train_df[train_df['platform'] == platform]['study']
train_dataset = CompendiumDataset([platform], classes, sample_to_label, metadata_path, compendium_path,
encoder, mode='platform')
total_irm_samples += len(train_dataset)
if len(train_dataset) > 0:
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
train_loaders.append(train_loader)
platform_file = hold_out_platform.split('(')[-1].strip(')')
full_train_studies = train_df[train_df['platform'] != hold_out_platform]['study'].unique()
full_train_dataset = CompendiumDataset(train_platforms, classes, sample_to_label, metadata_path,
compendium_path, encoder, mode='platform')
full_train_loader = DataLoader(full_train_dataset, batch_size=8, shuffle=True)
assert total_irm_samples == len(full_train_dataset)
for seed in range(num_seeds):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
net = models.ThreeLayerNet(len(compendium_df.index))
writer_path = Path('./logs/erm_analysis_{}_{}.tfrecord'.format(platform_file, seed))
writer = SummaryWriter(writer_path)
save_file = Path('./logs/erm_analysis_{}_{}.pkl'.format(platform_file, seed))
results = train.train_with_erm(net, full_train_loader,
tune_loader, num_epochs=400,
save_file=save_file, writer=writer)
erm_result_list.append(results)
net = models.ThreeLayerNet(len(compendium_df.index))
writer_path = Path('./logs/irm_analysis_{}_{}.tfrecord'.format(platform_file, seed))
writer = SummaryWriter(writer_path)
save_file = Path('./logs/irm_analysis_{}_{}.pkl'.format(platform_file, seed))
results = train.train_with_irm(net, train_loaders,
tune_loader, num_epochs=400,
loss_scaling_factor=1, save_file=save_file,
writer=writer, burn_in_epochs=0)
irm_result_list.append(results)
# In[16]:
def eval_model(net, loader):
all_labels = []
all_preds = []
for batch in loader:
expression, labels, ids = batch
expression = expression.float().to('cuda')
labels = labels.numpy()
all_labels.extend(labels)
output = net(expression)
preds = [1 if p > 0 else 0 for p in output]
all_preds.extend(preds)
f1 = metrics.f1_score(all_labels, all_preds)
return f1
# In[17]:
irm_f1_scores = []
erm_f1_scores = []
for hold_out_platform in platforms:
for seed in range(num_seeds):
# Load data
try:
hold_out_studies = train_df[train_df['platform'] == hold_out_platform]['study']
hold_out_dataset = CompendiumDataset(hold_out_studies, classes, sample_to_label, metadata_path, compendium_path, encoder)
hold_out_loader = DataLoader(hold_out_dataset, batch_size=1, shuffle=False)
# Load IRM model
platform_file = hold_out_platform.split('(')[-1].strip(')')
save_file = Path('./logs/irm_analysis_{}_{}.pkl'.format(platform_file, seed))
net = torch.load(save_file, 'cuda')
#Evaluate ERM model
f1_score = eval_model(net, hold_out_loader)
irm_f1_scores.append(f1_score)
# Load ERM model
save_file = Path('./logs/erm_analysis_{}_{}.pkl'.format(platform_file, seed))
net = torch.load(save_file, 'cuda')
# Evaluate IRM model
f1_score = eval_model(net, hold_out_loader)
erm_f1_scores.append(f1_score)
except FileNotFoundError as e:
print(e)
# In[18]:
print(irm_f1_scores)
print(erm_f1_scores)
held_out_platform_list = []
for platform in platforms:
p = [platform] * 2 * num_seeds
held_out_platform_list.extend(p)
#print(held_out_platform_list)
score_list = list(itertools.chain(*zip(irm_f1_scores, erm_f1_scores)))
print(score_list)
label_list = (['irm'] + ['erm']) * (len(score_list) // 2)
print(label_list)
# In[29]:
held_out_platform_list = [plat.split('(')[-1].strip(')') for plat in held_out_platform_list]
result_dict = {'f1_score': score_list, 'irm/erm': label_list, 'held_out_platform': held_out_platform_list}
result_df = pd.DataFrame(result_dict)
result_df.head()
# ## Results
#
# The first figures measure the models' performance on the held out platform. These figures measure the model's ability to generalize.
#
# The second set of figures measure the models' performance no the tuning set to measure the model's training behavior (and to a lesser extend the models' ability to predict a held-out set).
# In[30]:
(ggplot(result_df, aes('irm/erm', 'f1_score', color='held_out_platform')) +
geom_jitter(size=3) +
ggtitle('F1 Score on held-out platform')
)
# In[21]:
(ggplot(result_df, aes('irm/erm', 'f1_score')) +
geom_violin() +
ggtitle('F1 Score on held-out platform')
)
# In[38]:
irm_accs = [result['tune_acc'] for result in irm_result_list]
irm_mean_accs = [sum(accs) / len(accs) for accs in irm_accs]
print(irm_mean_accs)
[acc.sort() for acc in irm_accs]
irm_median_accs = [acc[len(acc) //2] for acc in irm_accs]
print(irm_median_accs)
irm_max_accs = [max(accs) for accs in irm_accs]
# In[39]:
erm_accs = [result['tune_acc'] for result in erm_result_list]
erm_mean_accs = [sum(accs) / len(accs) for accs in erm_accs]
print(erm_mean_accs)
[acc.sort() for acc in erm_accs]
erm_median_accs = [acc[len(acc) //2] for acc in erm_accs]
print(erm_median_accs)
erm_max_accs = [max(accs) for accs in erm_accs]
# In[40]:
mean_list = list(itertools.chain(*zip(irm_mean_accs, erm_mean_accs)))
median_list = list(itertools.chain(*zip(irm_median_accs, erm_median_accs)))
max_list = list(itertools.chain(*zip(irm_max_accs, erm_max_accs)))
label_list = (['irm'] + ['erm']) * (len(mean_list) // 2)
held_out_platform_list = []
for platform in platforms:
plat = platform.split('(')[-1].strip(')')
p = [plat] * 2 * num_seeds
held_out_platform_list.extend(p)
held_out_platform_list = [plat.split('(')[-1].strip(')') for plat in held_out_platform_list]
result_dict = {'mean_acc': mean_list, 'median_acc': median_list, 'max_acc': max_list, 'irm/erm': label_list,
'held_out_platform': held_out_platform_list}
result_df = | pd.DataFrame(result_dict) | pandas.DataFrame |
# Import libraries
import os
import sys
import anemoi as an
import pandas as pd
import numpy as np
import pyodbc
from datetime import datetime
import requests
import collections
import json
import urllib3
def return_between_date_query_string(start_date, end_date):
if start_date != None and end_date != None:
start_end_str = '''AND [TimeStampLocal] >= '%s' AND [TimeStampLocal] < '%s' ''' %(start_date, end_date)
elif start_date != None and end_date == None:
start_end_str = '''AND [TimeStampLocal] >= '%s' ''' %(start_date)
elif start_date == None and end_date != None:
start_end_str = '''AND [TimeStampLocal] < '%s' ''' %(end_date)
else:
start_end_str = ''
return start_end_str
def sql_or_string_from_mvs_ids(mvs_ids):
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
return or_string
def sql_list_from_mvs_ids(mvs_ids):
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
mvs_ids_list = ','.join([f"({mvs_id}_1)" for mvs_id in mvs_ids])
return mvs_ids_list
def rename_mvs_id_column(col, names, types):
name = names[int(col.split('_')[0])]
data_type = types[col.split('_')[1]]
return f'{name}_{data_type}'
# Define DataBase class
class M2D2(object):
'''Class to connect to RAG M2D2 PRD database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is::
import anemoi as an
m2d2 = an.io.database.M2D2()
:Parameters:
:Returns:
out: an.M2D2 object connected to M2D2
'''
self.database = 'M2D2'
server = '10.1.15.53' # PRD
#server = 'SDHQRAGDBDEV01\RAGSQLDBSTG' #STG
db = 'M2D2_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def connection_check(self, database):
return self.database == database
def masts(self):
'''
:Returns:
out: DataFrame of all met masts with measured data in M2D2
Example::
import anemoi as an
m2d2 = an.io.database.M2D2()
m2d2.masts()
'''
if not self.connection_check('M2D2'):
raise ValueError('Need to connect to M2D2 to retrieve met masts. Use anemoi.DataBase(database="M2D2")')
sql_query_masts = '''
SELECT [Project]
,[AssetID]
,[wmm_id]
,[mvs_id]
,[Name]
,[Type]
,[StartDate]
,[StopDate]
FROM [M2D2_DB_BE].[dbo].[ViewProjectAssetSensors] WITH (NOLOCK)
'''
sql_query_coordinates='''
SELECT [wmm_id]
,[WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]'''
masts = pd.read_sql(sql_query_masts, self.conn, parse_dates=['StartDate', 'StopDate'])
coordinates = pd.read_sql(sql_query_coordinates, self.conn)
masts = masts.merge(coordinates, left_on='wmm_id', right_on='wmm_id')
masts.set_index(['Project', 'wmm_id', 'WMM_Latitude', 'WMM_Longitude', 'Type'], inplace=True)
masts.sort_index(inplace=True)
return masts
def mvs_ids(self):
masts = self.masts()
mvs_ids = masts.mvs_id.values.tolist()
return mvs_ids
def valid_signal_labels(self):
signal_type_query = '''
SELECT [MDVT_ID]
,[MDVT_Name]
FROM [M2D2_DB_BE].[dbo].[MDataValueType]'''
signal_types = pd.read_sql(signal_type_query, self.conn, index_col='MDVT_Name').MDVT_ID
return signal_types
def column_labels_for_masts(self):
masts = self.masts()
mvs_ids = masts.mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def column_labels_for_data_from_mvs_ids(self, data):
masts = self.masts()
names_map = pd.Series(index=masts.mvs_id.values, data=masts.Name.values).to_dict()
types = self.valid_signal_labels()
types.loc['FLAG'] = 'Flag'
types_map = pd.Series(index=types.values.astype(str), data=types.index.values).to_dict()
data = data.rename(lambda x: rename_mvs_id_column(x, names=names_map, types=types_map), axis=1)
return data
def column_labels_for_wmm_id(self, wmm_id):
masts = self.masts()
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def data_from_sensors_mvs_ids(self, mvs_ids, signal_type='AVG'):
'''Download sensor data from M2D2
:Parameters:
mvs_ids: int or list
Virtual sensor IDs (mvs_ids) in M2D2, can be singular
signal_type: str, default 'AVG' - NOT SUPPORTED AT THIS TIME
Signal type for download
For example: 'AVG', 'SD', 'MIN', 'MAX', 'GUST'
:Returns:
out: DataFrame with signal data from virtual sensor
'''
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
valid_mvs_ids = self.mvs_ids()
assert all([mvs_id in valid_mvs_ids for mvs_id in mvs_ids]), f'One of the following is not a valid mvs_id: {mvs_ids}'
mvs_ids_list = sql_list_from_mvs_ids(mvs_ids)
sql_query= f"""
SET NOCOUNT ON
DECLARE @ColumnListID NVARCHAR(4000)
,@startDate DATETIME2
,@endDate DATETIME2
SET @ColumnListID= '{mvs_ids_list}'
SET @startDate = NULL
SET @endDate = NULL
EXECUTE [dbo].[proc_DataExport_GetDataByColumnList]
@ColumnListID
,@startDate
,@endDate
"""
data = pd.read_sql(sql_query, self.conn, index_col='CorrectedTimestamp')
data.index.name = 'stamp'
data.columns.name = 'sensor'
data = self.column_labels_for_data_from_mvs_ids(data)
return data
def data_from_mast_wmm_id(self, wmm_id):
'''Download data from all sensors on a mast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with signal data from each virtual sensor on the mast
'''
masts = self.masts()
wmm_ids = masts.index.get_level_values('wmm_id').sort_values().unique().tolist()
assert wmm_id in wmm_ids, f'the following is not a valid wmm_id: {wmm_id}'
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.values.tolist()
data = self.data_from_sensors_mvs_ids(mvs_ids)
return data
def metadata_from_mast_wmm_id(self, wmm_id):
'''Download mast metadata from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with mast metadata
'''
sql_query= '''
SELECT [WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]
WHERE wmm_id = {}
'''.format(wmm_id)
mast_metadata = pd.read_sql(sql_query, self.conn)
return mast_metadata
def mast_from_wmm_id(self, wmm_id):
'''Download an.MetMast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: an.MetMast with data and metadata from M2D2
'''
print(f'Downloading Mast {wmm_id} from M2D2')
data = self.data_from_mast_wmm_id(wmm_id=wmm_id)
metadata = self.metadata_from_mast_wmm_id(wmm_id=wmm_id)
mast = an.MetMast(data=data,
name=wmm_id,
lat=metadata.WMM_Latitude[0],
lon=metadata.WMM_Longitude[0],
elev=metadata.WMM_Elevation[0])
return mast
def masts_from_project(self, project):
'''Download an.MetMasts from M2D2 for a given project
:Parameters:
project_name: str
Project name in M2D2
:Returns:
out: List of an.MetMasts with data and metadata from M2D2 for a given project
'''
masts = self.masts()
projects = masts.index.get_level_values('Project').unique().tolist()
assert project in projects, f'Project {project} not found in M2D2'.format(project)
wmm_ids = masts.loc[project,:].index.get_level_values('wmm_id').sort_values().unique().tolist()
masts = [self.mast_from_wmm_id(wmm_id) for wmm_id in wmm_ids]
return masts
# Define Turbine class
class Turbine(object):
'''Class to connect to EDF Wind Turbine database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is:
import anemoi as an
turb_db = an.io.database.Turbine()
:Parameters:
:Returns:
out: an.Turbine object connected to Turbine database
'''
self.database = 'Turbine'
server = '10.1.15.53'
db = 'Turbine_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def metadata(self):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_turbines = '''
SELECT [TUR_Manufacturer]
,[TUR_RatedOutputkW]
,[TPC_MaxOutput]
,[TUR_RotorDia]
,[TUR_Model]
,[AllHubHeights]
,[TPC_DocumentDate]
,[TUR_ID]
,[IECClass]
,[TPG_ID]
,[TPG_Name]
,[TPC_ID]
,[TVR_VersionName]
,[TPC_dbalevel]
,[TPC_TIScenario]
,[TPC_BinType]
,[TTC_ID]
,[TRPMC_ID]
,[P_ID]
,[P_Name]
FROM [Turbine_DB_BE].[NodeEstimate].[AllPowerCurves]
WHERE TPC_Type = 'Manufacturer General Spec'
'''
turbines = pd.read_sql(sql_query_turbines, self.conn)
return turbines
def power_curve_from_tpc_id(self, tpc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TPCD_AirDensity,
TPCD_WindSpeedBin,
TPCD_OutputKW
FROM TPCDETAILS
WHERE TPC_id = {} AND TPCD_IsDeleted = 0;
'''.format(tpc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
def trust_curve_from_ttc_id(self, ttc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TTCD_AirDensity,
TTCD_WindSpeedBin,
TTCD_ThrustValue
FROM TTCDETAILS
WHERE TTC_id = {} AND TTCD_IsDeleted = 0;
'''.format(ttc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
# Define Padre class
class Padre(object):
'''Class to connect to PRE Padre database
'''
def __init__(self, database='PADREScada', conn_str=None, conn=None, domino=False):
'''Data structure with both database name and connection string.
:Parameters:
database: string, default None
Name of the padre database to connect to
conn_str: string, default None
SQL connection string needed to connect to the database
conn: object, default None
SQL connection object to database
'''
self.database = database
if self.database == 'PADREScada':
server = '10.1.106.44'
db = 'PADREScada'
elif self.database == 'PadrePI':
server = '10.1.106.44'
db = 'PADREScada'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str
try:
self.conn = pyodbc.connect(self.conn_str)
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def assets(self, project=None, turbines_only=False):
'''Returns:
DataFrame of all turbines within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
sql_query_assets = '''
SELECT [AssetKey]
,Projects.[ProjectName]
,[AssetType]
,[AssetName]
,Turbines.[Latitude]
,Turbines.[Longitude]
,[elevation_mt]
FROM [PADREScada].[dbo].[Asset] as Turbines
WITH (NOLOCK)
INNER JOIN [PADREScada].[dbo].[Project] as Projects on Turbines.ProjectKey = Projects.ProjectKey
'''
assets = pd.read_sql(sql_query_assets, self.conn)
assets.set_index(['ProjectName', 'AssetName'], inplace=True)
assets.sort_index(axis=0, inplace=True)
if turbines_only:
assets = assets.loc[assets.AssetType == 'Turbine', :]
assets.drop('AssetType', axis=1, inplace=True)
if project is not None:
assets = assets.loc[project, :]
return assets
def operational_projects(self):
'''Returns:
List of all projects within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve projects. Use anemoi.DataBase(database="Padre")')
padre_project_query = """
SELECT [ProjectKey]
,[ProjectName]
,[State]
,[NamePlateCapacity]
,[NumGenerators]
,[latitude]
,[longitude]
,[DateCOD]
FROM [PADREScada].[dbo].[Project]
WHERE technology = 'Wind'"""
projects = pd.read_sql(padre_project_query, self.conn)
projects.set_index('ProjectName', inplace=True)
return projects
def turbine_categorizations(self, category_type='EDF'):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
padre_cetegory_query = """
SELECT [CategoryKey]
,[StringName]
FROM [PADREScada].[dbo].[Categories]
WHERE CategoryType = '%s'""" %category_type
categories = pd.read_sql(padre_cetegory_query, self.conn)
categories.set_index('CategoryKey', inplace=True)
return categories
def QCd_turbine_data(self, asset_key):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT [TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Ambient_Temperature]
,[IEC Category]
,[EDF Category]
,[Expected Power (kW)]
,[Expected Energy (kWh)]
,[EnergyDelta (kWh)]
,[EnergyDelta (MWh)]
FROM [PADREScada].[dbo].[vw_10mDataBI]
WITH (NOLOCK)
WHERE [assetkey] = %i''' %asset_key
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def raw_turbine_data(self, asset_key, start_date=None, end_date=None):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT
[TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Nacelle_Direction]
,[Average_Blade_Pitch]
,[Minimum_Blade_Pitch]
,[Maximum_Blade_Pitch]
,[Average_Rotor_Speed]
,[Minimum_Rotor_Speed]
,[Maximum_Rotor_Speed]
,[Average_Ambient_Temperature]
,coalesce([IECStringKey_Manual]
,[IECStringKey_FF]
,[IECStringKey_Default]) IECKey
,coalesce([EDFStringKey_Manual]
,[EDFStringKey_FF]
,[EDFStringKey_Default]) EDFKey
,coalesce([State_and_Fault_Manual]
,[State_and_Fault_FF]
,[State_and_Fault]) State_and_Fault
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [assetkey] = {} {}'''.format(asset_key, return_between_date_query_string(start_date, end_date))
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def raw_turbine_expected_energy(self, asset_key):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT
[TimeStampLocal]
,[Expected_Power_NTF]
,[Expected_Energy_NTF]
,[Expected_Power_RefMet]
,[Expected_Energy_RefMet]
,[Expected_Power_Uncorr]
,[Expected_Energy_Uncorr]
,[Expected_Power_DensCorr]
,[Expected_Energy_DensCorr]
,[Expected_Power_AvgMet]
,[Expected_Energy_AvgMet]
,[Expected_Power_ProxyWTGs]
,[Expected_Energy_ProxyWTGs]
,[Expected_Power_MPC]
,[Expected_Energy_MPC]
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [assetkey] = {}'''.format(asset_key)
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def senvion_event_logs(self, project_id):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
sql_query = '''
SELECT [assetkey]
,[TimeStamp]
,[statuscode]
,[incomingphasingoutreset]
FROM [PADREScada].[dbo].[SenvionEventLog]
WHERE projectkey = {} and incomingphasingoutreset != 'Reset'
ORDER BY assetkey, TimeStamp
'''.format(project_id)
event_log = pd.read_sql(sql_query, self.conn)
return event_log
def ten_min_energy_by_status_code(self, project_id, start_date, end_date, padre_NTF=True):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
if padre_NTF:
padre_power_col = 'Expected_Power_NTF'
else:
padre_power_col = 'Expected_Power_DensCorr'
padre_project_query = '''
SELECT [TimeStampLocal]
,[AssetKey]
,[Average_Active_Power]
,[{}]
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [projectkey] = {} {}
ORDER BY TimeStampLocal, AssetKey'''.format(padre_power_col, project_id, return_between_date_query_string(start_date, end_date))
data_ten_min = pd.read_sql(padre_project_query, self.conn).set_index(['TimeStampLocal', 'AssetKey'])
data_ten_min.columns = ['power_active','power_expected']
data_ten_min = data_ten_min.groupby(data_ten_min.index).first()
data_ten_min.index = pd.MultiIndex.from_tuples(data_ten_min.index)
data_ten_min.index.names = ['Stamp', 'AssetKey']
return data_ten_min
def senvion_ten_min_energy_by_status_code(self, project_id, status_codes=[6680.0, 6690.0, 6697.0, 15000.0]):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
projects = self.operational_projects()
project = projects.loc[projects.ProjectKey == project_id].index.values[0]
if project in ['<NAME>','<NAME>','St. <NAME>']:
padre_NTF = False
else:
padre_NTF = True
event_log = self.senvion_event_logs(project_id=project_id)
event_log_icing = event_log.loc[event_log.statuscode.isin(status_codes), :]
incoming = event_log_icing.loc[event_log_icing.incomingphasingoutreset == 'incoming', ['assetkey', 'statuscode', 'TimeStamp']].reset_index(drop=True)
outgoing = event_log_icing.loc[event_log_icing.incomingphasingoutreset == 'phasing out', 'TimeStamp'].reset_index(drop=True)
status = pd.concat([incoming, outgoing], axis=1).dropna()
status.columns = ['asset_key', 'status_code', 'start', 'end']
status['start_ten_min'] = status.start.apply(lambda dt: datetime(dt.year, dt.month, dt.day, dt.hour,10*(dt.minute // 10)))
status['end_ten_min'] = status.end.apply(lambda dt: datetime(dt.year, dt.month, dt.day, dt.hour,10*(dt.minute // 10)))
status_start_date = status.loc[:,['start_ten_min','end_ten_min']].min().min()
status_end_date = status.loc[:,['start_ten_min','end_ten_min']].max().max()
stamp = pd.date_range(start=status_start_date, end=status_end_date, freq='10T')
icing_flags_cols = pd.MultiIndex.from_product([status.asset_key.unique(), status_codes], names=['AssetKey', 'Flag'])
icing_flags = pd.DataFrame(index=stamp, columns=icing_flags_cols)
for col in icing_flags.columns:
asset_key = col[0]
icing_flag = col[1]
icing_flags.loc[status.loc[(status.asset_key==asset_key)&(status.status_code==icing_flag),'start_ten_min'],pd.IndexSlice[asset_key,icing_flag]] = 1.0
icing_flags.loc[status.loc[(status.asset_key==asset_key)&(status.status_code==icing_flag), 'end_ten_min'],pd.IndexSlice[asset_key,icing_flag]] = 0.0
icing_flags.fillna(method='ffill', inplace=True)
icing_flags.fillna(0, inplace=True)
icing_flags.index.name = 'Stamp'
data_power = self.ten_min_energy_by_status_code(project_id=project_id, start_date=status_start_date, end_date=status_end_date, padre_NTF=padre_NTF)
data_power = data_power.reset_index().pivot(index='Stamp', columns='AssetKey')
data_power.columns = data_power.columns.swaplevel()
data_ten_min = pd.concat([data_power, icing_flags], axis=1).sort_index(axis=0).dropna()
return data_ten_min
def monthly_energy_by_status_code(self, project_id, start_date, end_date, padre_NTF=True):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
if padre_NTF:
padre_power_col = 'Expected_Power_NTF'
else:
padre_power_col = 'Expected_Power_DensCorr'
padre_project_query = '''
SELECT [TimeStampLocal]
,[AssetKey]
,[Average_Nacelle_Wdspd]
,[Average_Ambient_Temperature]
,[Average_Active_Power]
,[State_and_Fault] as Fault_Code
,[{}]
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [projectkey] = {} {}
ORDER BY TimeStampLocal, AssetKey, Fault_Code'''.format(padre_power_col, project_id, return_between_date_query_string(start_date, end_date))
columns_sum = ['Average_Active_Power','Expected_Power_NTF','AssetKey', 'Fault_Code']
columns_count = ['Average_Active_Power', 'AssetKey', 'Fault_Code']
monthly_data = pd.read_sql(padre_project_query, self.conn).set_index('TimeStampLocal').fillna(method='ffill')
monthly_data_sum = monthly_data.loc[:,columns_sum].groupby([monthly_data.index.year, monthly_data.index.month, 'AssetKey', 'Fault_Code']).sum()
monthly_data_count = monthly_data.loc[:,columns_count].groupby([monthly_data.index.year, monthly_data.index.month, 'AssetKey', 'Fault_Code']).count()
monthly_data = pd.concat([monthly_data_sum, monthly_data_count], axis=1)/6.0
monthly_data.columns = ['Average_Energy','Expected_Energy_NTF','Hours']
monthly_data.index.names = ['Year', 'Month', 'AssetKey', 'FaultCode']
return monthly_data
def site_production_data(self, project):
site_data = []
turbines = self.turbines(project).loc[:, 'AssetKey'].values
for i, turbine in enumerate(turbines):
print('{} of {} masts downloaded'.format(i+1, len(turbines)))
turbine_data = self.turbine_data(turbine)
site_data.append(turbine_data)
site_data = pd.concat(site_data, axis=1, keys=turbines)
site_data.columns.names = ['Turbine', 'Signal']
site_data.sort_index(axis=1, inplace=True)
return site_data
def meter_data(self, project):
if not self.is_connected('PadrePI'):
raise ValueError('Need to connect to PadrePI to retrieve met masts. Use anemoi.DataBase(database="PadrePI")')
meter_data_query = """
SELECT
p.NamePlateCapacity/1000.0 AS NamePlateCapcity,
p.NumGenerators,
bopa.bop_asset_type AS [BoPAssetType],
bct.Time,
bct.Average_Power,
bct.Average_Reactive_Power,
bct.Range_Produced_Energy,
bct.Snapshot_Produced_Energy,
bct.Range_Consumed_Energy,
bct.Snapshot_Consumed_Energy
FROM dbo.BoPCriticalTag bct
INNER JOIN dbo.BoPAsset bopa ON bopa.bopassetkey = bct.BoPAssetKey
INNER JOIN dbo.Project p ON p.ProjectKey = bopa.projectkey
WHERE bopa.BOP_Asset_Type LIKE '%meter%' and p.ProjectName = '{}'""".format(project)
meter_data = pd.read_sql(meter_data_query, self.conn)
meter_data['Time'] = pd.to_datetime(meter_data['Time'], format='%Y-%m-%d %H:%M:%S')
meter_data.set_index('Time', inplace=True)
meter_data.index.name = 'Stamp'
meter_data.sort_index(axis=0, inplace=True)
return meter_data
def operational_analysis_metadata(self):
'''Returns:
Project metadata for operational analysis
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve projects. Use anemoi.DataBase(database="Padre")')
metadata_query = """
SELECT [ProjectName]
,[mfg]
,[TimeZone]
,[NamePlateCapacity]
,[NumGenerators]
,[latitude]
,[longitude]
,[DateCOD]
FROM [PADREScada].[dbo].[Project] with(nolock)
WHERE [PADREScada].[dbo].[Project].[technology] = 'Wind' and
[PADREScada].[dbo].[Project].[active] = 1
"""
metadata = pd.read_sql(metadata_query, self.conn)
metadata.set_index('ProjectName', inplace=True)
return metadata
def operational_analysis_monthly_invoiced_production(self):
'''Returns:
Project monthly invoiced production for operational analysis
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve projects. Use anemoi.DataBase(database="Padre")')
invoiced_production_query = '''
SELECT P.[ProjectName]
,[Year]
,[Month]
,[InvoicedProduction_kWh]
FROM [PADREScada].[dbo].[ProjectInvoicedProduction] PIP with(nolock)
INNER JOIN [PADREScada].[dbo].[Project] P ON P.projectKey = PIP.ProjectKey
'''
invoiced_production = pd.read_sql(invoiced_production_query, self.conn)
invoiced_production['Day'] = 1
invoiced_production['Stamp'] = pd.to_datetime(invoiced_production[['Year','Month','Day']])
invoiced_production = invoiced_production.set_index(['ProjectName','Stamp'])
invoiced_production = invoiced_production.drop(['Year','Month','Day'], axis=1)
return invoiced_production
def operational_analysis_scada_production(self):
'''Returns:
Project monthly scada production for operational analysis
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve projects. Use anemoi.DataBase(database="Padre")')
scada_production_query = '''
SELECT P.[ProjectName]
,[Date] Stamp
,[WTGProduction_MWh]
,[TotalExpectedProduction_MWh]
,[TotalEnergyDelta_MWh]
,[FullPerformanceEnergyDelta_MWH]
,[PartialPerformanceDegradedEnergyDelta_MWH]
,[PartialPerformanceDeratedEnergyDelta_MWH]
,[PartialPerformanceExtCurtailment_MWH]
,[PartialPerformanceEnvironmentEnergyDelta_MWH]
,[ForcedOutageEnergyDelta_MWH]
,[SchedMaintenanceEnergyDelta_MWH]
,[CorrectiveActionsEnergyDelta_MWH]
,[TechnicalStandbyEnergyDelta_MWH]
,[RequestedShutdownEnergyDelta_MWH]
,[RequestedShutdownExtCurtailmentEnergyDelta_MWH]
,[OutOfElectricalSpecEnergyDelta_MWH]
,[OutOfEnvironmentalSpecEnergyDelta_MWH]
,[CalmWindsEnergyDelta_MWH]
,[HighWindsEnergyDelta_MWH]
,[ForceMajeureEnergyDelta_MWH]
,[UnclassifiedDowntimeEnergyDelta_MWH]
FROM [PADREScada].[dbo].[ProjectCalcDataDaily] PCDD with(nolock)
INNER JOIN [PADREScada].[dbo].[Project] P ON P.projectKey = PCDD.ProjectKey
'''
scada_production = | pd.read_sql(scada_production_query, self.conn, parse_dates=['Stamp']) | pandas.read_sql |
'''
Key take-away: feature engineering is important. Garbage in = Garbage Out
'''
from cleanData import cleanData
import time
import sys
plotBool = int(sys.argv[1]) if len(sys.argv)>1 else 0
resampleDataBool = int(sys.argv[2]) if len(sys.argv)>2 else 1
MISelectorBool = int(sys.argv[3]) if len(sys.argv)>3 else 0
start = time.time()
data,dataPreCovid,dataPostCovid = cleanData(verbose=0)
end = time.time()
print('Time: Data Extraction: {} seconds'.format(end - start) );
'''
Import libraries needed
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
## General regression and classification functions: validation
from regressionLib import splitCV, plotBetaAccuracy
from regressionLib import confusionMatrix, metrics
from regressionLib import flatten
## Exploration and cluster analysis
from sklearn.cluster import KMeans,MeanShift
from regressionLib import corrMatrix, corrMatrixHighCorr
## Models
from sklearn.linear_model import LogisticRegression,Perceptron
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
## Plots
from regressionLib import plotPredictorVsResponse
'''
Data Dictionaries
'''
## Only select predictors highly correlated with severity
print('Correlation with severity')
def predictorsCorrelatedWithTarget(data):
correlation = [1]
for i in range(1,len(data.columns)):
correlation.append(np.corrcoef(data[[data.columns[0],data.columns[i]]].T)[0,1])
correlation = np.array(correlation)
sortedCorr = np.sort(np.abs(correlation))
sortedCorrIdx = np.argsort(np.abs(correlation))
cols = list(data.columns[sortedCorrIdx[sortedCorr>0.05]]) ## at least 5% correlation needed
return cols
def prepDataForTraining(data):
predictorColNames = list(data.columns)
predictorColNames.remove('Severity')
X = np.array(data[predictorColNames])
targetColNames = ['Severity']
Y = np.array(data['Severity'])
dataDict = {'X':X,
'Y':Y,
'predictorNames':predictorColNames,
'targetName':targetColNames}
return dataDict
#################################################################################################################
# ### TEMP CODE: DELETE LATER
# dataDict = prepDataForTraining(data)
# dataDictPreCovid = prepDataForTraining(dataPreCovid)
# dataDictPostCovid = prepDataForTraining(dataPostCovid)
# # Correlation matrix: ALL VARIABLES
# if plotBool == 0:
# predictors = pd.DataFrame(dataDict['X'], columns=dataDict['predictorNames'])
# fig = corrMatrixHighCorr(predictors)
# fig.savefig('Plots/CorrMatrixHighThreshRAW.svg')
# fig = corrMatrix(predictors)
# fig.savefig('Plots/CorrMatrixRAW.svg')
# predictorsPreCovid = pd.DataFrame(dataDictPreCovid['X'], columns=dataDictPreCovid['predictorNames'])
# fig = corrMatrixHighCorr(predictorsPreCovid)
# fig.savefig('Plots/CorrMatrixHighThreshPreCovidRAW.svg')
# fig = corrMatrix(predictorsPreCovid)
# fig.savefig('Plots/CorrMatrixPreCovidRAW.svg')
# predictorsPostCovid = pd.DataFrame(dataDictPostCovid['X'], columns=dataDictPostCovid['predictorNames'])
# fig = corrMatrixHighCorr(predictorsPostCovid)
# fig.savefig('Plots/CorrMatrixHighThreshPostCovidRAW.svg')
# fig = corrMatrix(predictorsPostCovid)
# fig.savefig('Plots/CorrMatrixPostCovidRAW.svg')
# #################################################################################################################
dataDict = prepDataForTraining(data[predictorsCorrelatedWithTarget(data)])
dataDictPreCovid = prepDataForTraining(dataPreCovid[predictorsCorrelatedWithTarget(dataPreCovid)])
dataDictPostCovid = prepDataForTraining(dataPostCovid[predictorsCorrelatedWithTarget(dataPostCovid)])
## Mutual information between selected predictors and target
# Mutual information: MI(X,Y) = Dkl( P(X,Y) || Px \crossproduct Py)
from sklearn.feature_selection import mutual_info_classif
def mutualInfoPredictorsTarget(dataDict):
MI = mutual_info_classif(dataDict['X'],dataDict['Y'])
return ['{}: {}'.format(name,MI[i]) for i,name in enumerate(dataDict['predictorNames']) ]
if MISelectorBool != 0:
print('Mutual Information: data\n{}\n'.format( mutualInfoPredictorsTarget(dataDict) ) )
print('Mutual Information: dataPreCovid\n{}\n'.format( mutualInfoPredictorsTarget(dataDictPreCovid) ) )
print('Mutual Information: dataPostCovid\n{}\n'.format( mutualInfoPredictorsTarget(dataDictPostCovid) ) )
if resampleDataBool != 0:
from regressionLib import resampleData
dataDict = resampleData(dataDict)
dataDictPreCovid = resampleData(dataDictPreCovid)
dataDictPostCovid = resampleData(dataDictPostCovid)
'''
Correlation matrix: Features
'''
if plotBool != 0:
predictors = pd.DataFrame(dataDict['X'], columns=dataDict['predictorNames'])
fig = corrMatrixHighCorr(predictors)
fig.savefig('Plots/CorrMatrixHighThreshfeat.svg')
fig = corrMatrix(predictors)
fig.savefig('Plots/CorrMatrixfeat.svg')
predictorsPreCovid = pd.DataFrame(dataDictPreCovid['X'], columns=dataDictPreCovid['predictorNames'])
fig = corrMatrixHighCorr(predictorsPreCovid)
fig.savefig('Plots/CorrMatrixHighThreshPreCovidfeat.svg')
fig = corrMatrix(predictorsPreCovid)
fig.savefig('Plots/CorrMatrixPreCovidfeat.svg')
predictorsPostCovid = pd.DataFrame(dataDictPostCovid['X'], columns=dataDictPostCovid['predictorNames'])
fig = corrMatrixHighCorr(predictorsPostCovid)
fig.savefig('Plots/CorrMatrixHighThreshPostCovidfeat.svg')
fig = corrMatrix(predictorsPostCovid)
fig.savefig('Plots/CorrMatrixPostCovidfeat.svg')
# #############################################################################
# sys.exit("Just wanted correlation matrices lol")
# #############################################################################
## Initial model selection study: using testTrain split and credible intervals, binomial significance
'''
Training models: Base model
'''
XTrain,XTest,YTrain,YTest,idxTrain,idxTest = splitCV(dataDict['X'],
dataDict['Y'],
returnIdx=True).testTrain(testRatio=0.05)
XTrainPreCovid,XTestPreCovid,YTrainPreCovid,YTestPreCovid,idxTrainPreCovid,idxTestPreCovid = splitCV(dataDictPreCovid['X'],
dataDictPreCovid['Y'],
returnIdx=True).testTrain(testRatio=0.05)
XTrainPostCovid,XTestPostCovid,YTrainPostCovid,YTestPostCovid,idxTrainPostCovid,idxTestPostCovid = splitCV(dataDictPostCovid['X'],
dataDictPostCovid['Y'],
returnIdx=True).testTrain(testRatio=0.05)
'''
Train Models and Test: Draw beta distribution of accuracy.
## base model: logistic regression (location 0)
## All multiclass classifiers are declared here and fit(), predict() methods form sklearn model classes are used
'''
Mdls = {'MdlName': ['Logistic Regression',
'Random Forest: Bootstrap Aggregation',
'Random Forest: AdaBoost',
'Neural Network: 3 hidden layers, 50 hidden units'],
'Mdl': [ LogisticRegression(max_iter=5000) ,
RandomForestClassifier(n_estimators=100,criterion='entropy',max_depth=10,min_samples_leaf=100,min_samples_split=150,bootstrap=True),
AdaBoostClassifier(base_estimator = DecisionTreeClassifier(criterion='entropy',max_depth=5) , n_estimators=50, learning_rate=1),
MLPClassifier(hidden_layer_sizes=(50,50,50,), alpha=0.1 , max_iter=2000, activation = 'logistic', solver='adam') ],
'Predictions': np.zeros(shape=(4,),dtype='object'),
'Confusion Matrix': np.zeros(shape=(4,),dtype='object') }
MdlsPreCovid = {'MdlName': ['Logistic Regression: Pre-Covid',
'Random Forest: Bootstrap Aggregation: Pre-Covid',
'Random Forest: AdaBoost: Pre-Covid',
'Neural Network: 3 hidden layers, 10 hidden units'],
'Mdl':[LogisticRegression(max_iter=5000) ,
RandomForestClassifier(n_estimators=100,criterion='entropy',max_depth=10,min_samples_leaf=100,min_samples_split=150,bootstrap=True),
AdaBoostClassifier(base_estimator = DecisionTreeClassifier(criterion='entropy',max_depth=5) , n_estimators=50, learning_rate=1),
MLPClassifier(hidden_layer_sizes=(50,50,50,), alpha=0.1 , max_iter=2000, activation = 'logistic', solver='adam') ],
'Predictions': np.zeros(shape=(4,),dtype='object'),
'Confusion Matrix': np.zeros(shape=(4,),dtype='object') }
MdlsPostCovid = {'MdlName': ['Logistic Regression: Post-Covid',
'Random Forest: Bootstrap Aggregation: Post-Covid',
'Random Forest: AdaBoost: Post-Covid',
'Neural Network: 3 hidden layers, 10 hidden units'],
'Mdl':[LogisticRegression(max_iter=5000) ,
RandomForestClassifier(n_estimators=100,criterion='entropy',max_depth=10,min_samples_leaf=100,min_samples_split=150,bootstrap=True),
AdaBoostClassifier(base_estimator = DecisionTreeClassifier(criterion='entropy',max_depth=5) , n_estimators=50, learning_rate=1),
MLPClassifier(hidden_layer_sizes=(50,50,50,), alpha=0.1 , max_iter=2000, activation = 'logistic', solver='adam') ],
'Predictions': np.zeros(shape=(4,),dtype='object'),
'Confusion Matrix': np.zeros(shape=(4,),dtype='object') }
## Fit sklearn models
def fitTestModel(Mdl,MdlName,XTrain,YTrain,XTest,YTest,saveLocation=None):
start = time.time()
Mdl.fit(XTrain, YTrain)
end = time.time()
print('Time: {}: {} seconds'.format(MdlName,end - start) )
pred = []
for i in range(XTest.shape[0]):
pred.append(Mdl.predict(XTest[i].reshape(1,-1)))
pred = np.array(pred).reshape(YTest.shape)
accuracy = np.mean(pred == YTest)
print('Accuracy: {}'.format(accuracy) )
if type(saveLocation)!=type(None):
plotBetaAccuracy(accuracy,XTest.shape[0],saveLocation)
else:
plotBetaAccuracy(accuracy,XTest.shape[0])
cMatrix = confusionMatrix(classificationTest = pred,
Ytest = pd.Series(YTest))
overallAccuracy, userAccuracy, producerAccuracy, kappaCoeff = metrics(cMatrix)
print('Overall Accuracy: {}'.format(np.round(overallAccuracy,3)))
print("User's Accuracy: {}".format(np.round(userAccuracy,3)))
print("Producer's Accuracy: {}".format(np.round(producerAccuracy,3)))
print('Kappa Coefficient: {}\n'.format(np.round(kappaCoeff,6)))
print('########################################################\n')
return Mdl,pred,cMatrix
def cMatrixPlots(cMatrixList,YTest,MdlNames):
## DO NOT CALL THIS FUNCTION IN SCRIPT. Use it only in jupyter to plot confusion matrices
fig,axs = plt.subplots(nrows=2,ncols=np.ceil(len(cMatrixList)/2).astype(int),figsize=(3*len(cMatrixList),8))
ax = axs.reshape(-1)
cMatrixLabels = list( | pd.Series(YTest) | pandas.Series |
import streamlit as st
import pandas as pd
import plotly.graph_objects as go
import app.charts.constants as con
def render_sen_by_cat(df):
st.subheader("Sentiment by Category")
cat_by_sen = dict(zip(con.kpis, [[], [], [], []]))
for col in con.kpis:
for sen in con.sentiments:
total = len(df[df[col] == 1])
df_tmp = df[df[col] == 1]
num_sen = len(df_tmp[df_tmp["p_sentiment"] == sen])
percentage = 0
if num_sen != 0:
percentage = round(num_sen / total * 100, 1)
cat_by_sen[col].append(percentage)
fig = go.Figure()
rows = list(cat_by_sen.values())
for i in range(0, len(con.sentiment_labels)):
for xd, yd in zip(rows, con.kpi_labels):
fig.add_trace(
go.Bar(
x=[xd[i]],
y=[yd],
orientation="h",
marker=dict(
color=con.colors_sentiment[i],
line=dict(color="rgb(248, 248, 249)", width=1),
),
)
)
fig.update_layout(
xaxis=dict(
showgrid=False,
showline=False,
showticklabels=False,
zeroline=False,
domain=[0.1, 1],
),
yaxis=dict(
showgrid=False,
showline=False,
showticklabels=False,
zeroline=False,
),
barmode="stack",
height=con.chart_height,
paper_bgcolor=con.paper_bgcolor,
plot_bgcolor=con.paper_bgcolor,
margin=con.chart_margins,
showlegend=False,
)
annotations = []
for yd, xd in zip(con.kpi_labels, rows):
# labeling the y-axis
annotations.append(
dict(
xref="paper",
yref="y",
x=0.1,
y=yd,
xanchor="right",
text=str(yd),
font=dict(family="Arial", size=14, color="rgb(67, 67, 67)"),
showarrow=False,
align="right",
)
)
# labeling the first percentage of each bar (x_axis)
annotations.append(
dict(
xref="x",
yref="y",
x=xd[0] / 2,
y=yd,
text=str(xd[0]) + "%",
font=dict(family="Arial", size=14, color="rgb(248, 248, 255)"),
showarrow=False,
)
)
# labeling the first Likert scale (on the top)
if yd == con.kpi_labels[-1]:
annotations.append(
dict(
xref="x",
yref="paper",
x=xd[0] / 2,
y=-0.1,
text=con.sentiment_labels[0],
font=dict(family="Arial", size=14, color="rgb(67, 67, 67)"),
showarrow=False,
)
)
space = xd[0]
for i in range(1, len(xd)):
# labeling the rest of percentages for each bar (x_axis)
annotations.append(
dict(
xref="x",
yref="y",
x=space + (xd[i] / 2),
y=yd,
text=str(xd[i]) + "%",
font=dict(family="Arial", size=14, color="rgb(248, 248, 255)"),
showarrow=False,
)
)
# labeling the Likert scale
if yd == con.kpi_labels[-1]:
annotations.append(
dict(
xref="x",
yref="paper",
x=space + (xd[i] / 2),
y=-0.1,
text=con.sentiment_labels[i],
font=dict(family="Arial", size=14, color="rgb(67, 67, 67)"),
showarrow=False,
)
)
space += xd[i]
fig.update_layout(annotations=annotations)
st.plotly_chart(fig, use_container_width=True)
def render_sen_by_time(df):
st.subheader("Sentiment (Across Months)")
sen_by_month = dict()
for i, month in enumerate(con.months):
df_tmp = df.loc[( | pd.to_datetime(df["dt"]) | pandas.to_datetime |
import datetime
import logging
import pandas as pd
import os
import sys
from .processors import LocustResourceProcessor
class PreProcessor:
RESOURCES = ['LOCUST']
def __init__(self, resource, time_formatter, *args, **kwargs):
if resource not in PreProcessor.RESOURCES:
logging.critical(
'Invalid Usage: Please assign a resource defined in '
+ 'PreProcessor.RESOURCES.')
sys.exit(1)
if resource == 'LOCUST':
if 'distribution_filename' in kwargs \
and 'requests_filename' in kwargs:
self.resource_processor = LocustResourceProcessor. \
LocustResourceProcessor(
distribution_filename=kwargs['distribution_filename'],
requests_filename=kwargs['requests_filename'])
elif 'distribution_filename' in kwargs:
self.resource_processor = LocustResourceProcessor. \
LocustResourceProcessor(
distribution_filename=kwargs['distribution_filename'])
elif 'requests_filename' in kwargs:
self.resource_processor = LocustResourceProcessor. \
LocustResourceProcessor(
requests_filename=kwargs['requests_filename'])
else:
self.resource_processor = LocustResourceProcessor. \
LocustResourceProcessor()
self.time_formatter = time_formatter
def process(self, reports_path):
"""Performance Report as pandas DataFrame.
Args:
reports_dir: directory having directory \
which includes locust reports.
Returns:
reports [pandas.DataFrame]: Having performance test reports and \
following columns.
1. Name: test target.
2. # requests: number of requests.
3. 99%: 99%tile Latency. any %tile Latency is available \
because you have to assign key when plotting charts.
4. Median response time: 50%tile Latency.
5. Average response time: ditto.
6. Min response time: ditto.
8. Max response time: ditto.
4. # failures: number of failures.
9. Requests/s: requests per second.
10: DateTime [pandas.TimeStamp]: date executed test.
"""
report_dirs = [f for f in os.listdir(reports_path) if os.path.isdir(
os.path.join(reports_path, f))]
reports_df = None
for report_dir in report_dirs:
tmp_df = self._process(reports_path, report_dir)
if reports_df is None:
reports_df = tmp_df
else:
reports_df = | pd.concat([reports_df, tmp_df], ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import statsmodels.api as sm
from collections import namedtuple
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.tsa.arima.model import ARIMA
from scipy.linalg import toeplitz
ModelWithResults = namedtuple("ModelWithResults", ["model", "alg", "inference_dataframe"])
"""
Fixtures for a number of models available in statsmodels
https://www.statsmodels.org/dev/api.html
"""
def ols_model(**kwargs):
# Ordinary Least Squares (OLS)
np.random.seed(9876789)
nsamples = 100
x = np.linspace(0, 10, 100)
X = np.column_stack((x, x ** 2))
beta = np.array([1, 0.1, 10])
e = np.random.normal(size=nsamples)
X = sm.add_constant(X)
y = np.dot(X, beta) + e
ols = sm.OLS(y, X)
model = ols.fit(**kwargs)
return ModelWithResults(model=model, alg=ols, inference_dataframe=X)
def failing_logit_model():
X = pd.DataFrame(
{
"x0": np.array([2.0, 3.0, 1.0, 2.0, 20.0, 30.0, 10.0, 20.0]),
"x1": np.array([2.0, 3.0, 1.0, 2.0, 20.0, 30.0, 10.0, 20.0]),
},
columns=["x0", "x1"],
)
y = np.array([0, 0, 0, 0, 1, 1, 1, 1])
# building the model and fitting the data
log_reg = sm.Logit(y, X)
model = log_reg.fit()
return ModelWithResults(model=model, alg=log_reg, inference_dataframe=X)
def get_dataset(name):
dataset_module = getattr(sm.datasets, name)
data = dataset_module.load()
data.exog = np.asarray(data.exog)
data.endog = np.asarray(data.endog)
return data
def gls_model():
# Generalized Least Squares (GLS)
data = get_dataset("longley")
data.exog = sm.add_constant(data.exog)
ols_resid = sm.OLS(data.endog, data.exog).fit().resid
res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
rho = res_fit.params
order = toeplitz(np.arange(16))
sigma = rho ** order
gls = sm.GLS(data.endog, data.exog, sigma=sigma)
model = gls.fit()
return ModelWithResults(model=model, alg=gls, inference_dataframe=data.exog)
def glsar_model():
# Generalized Least Squares with AR covariance structure
X = range(1, 8)
X = sm.add_constant(X)
Y = [1, 3, 4, 5, 8, 10, 9]
glsar = sm.GLSAR(Y, X, rho=2)
model = glsar.fit()
return ModelWithResults(model=model, alg=glsar, inference_dataframe=X)
def wls_model():
# Weighted Least Squares
Y = [1, 3, 4, 5, 2, 3, 4]
X = range(1, 8)
X = sm.add_constant(X)
wls = sm.WLS(Y, X, weights=list(range(1, 8)))
model = wls.fit()
return ModelWithResults(model=model, alg=wls, inference_dataframe=X)
def recursivels_model():
# Recursive Least Squares
dta = sm.datasets.copper.load_pandas().data
dta.index = pd.date_range("1951-01-01", "1975-01-01", freq="AS")
endog = dta.WORLDCONSUMPTION
# To the regressors in the dataset, we add a column of ones for an intercept
exog = sm.add_constant(
dta[["COPPERPRICE", "INCOMEINDEX", "ALUMPRICE", "INVENTORYINDEX"]] # pylint: disable=E1136
)
rls = sm.RecursiveLS(endog, exog)
model = rls.fit()
inference_dataframe = pd.DataFrame([["1951-01-01", "1975-01-01"]], columns=["start", "end"])
return ModelWithResults(model=model, alg=rls, inference_dataframe=inference_dataframe)
def rolling_ols_model():
# Rolling Ordinary Least Squares (Rolling OLS)
from statsmodels.regression.rolling import RollingOLS
data = get_dataset("longley")
exog = sm.add_constant(data.exog, prepend=False)
rolling_ols = RollingOLS(data.endog, exog)
model = rolling_ols.fit(reset=50)
return ModelWithResults(model=model, alg=rolling_ols, inference_dataframe=exog)
def rolling_wls_model():
# Rolling Weighted Least Squares (Rolling WLS)
from statsmodels.regression.rolling import RollingWLS
data = get_dataset("longley")
exog = sm.add_constant(data.exog, prepend=False)
rolling_wls = RollingWLS(data.endog, exog)
model = rolling_wls.fit(reset=50)
return ModelWithResults(model=model, alg=rolling_wls, inference_dataframe=exog)
def gee_model():
# Example taken from
# https://www.statsmodels.org/devel/examples/notebooks/generated/gee_nested_simulation.html
np.random.seed(9876789)
p = 5
groups_var = 1
level1_var = 2
level2_var = 3
resid_var = 4
n_groups = 100
group_size = 20
level1_size = 10
level2_size = 5
n = n_groups * group_size * level1_size * level2_size
xmat = np.random.normal(size=(n, p))
# Construct labels showing which group each observation belongs to at each level.
groups_ix = np.kron(np.arange(n // group_size), np.ones(group_size)).astype(np.int)
level1_ix = np.kron(np.arange(n // level1_size), np.ones(level1_size)).astype(np.int)
level2_ix = np.kron(np.arange(n // level2_size), np.ones(level2_size)).astype(np.int)
# Simulate the random effects.
groups_re = np.sqrt(groups_var) * np.random.normal(size=n // group_size)
level1_re = np.sqrt(level1_var) * np.random.normal(size=n // level1_size)
level2_re = np.sqrt(level2_var) * np.random.normal(size=n // level2_size)
# Simulate the response variable
y = groups_re[groups_ix] + level1_re[level1_ix] + level2_re[level2_ix]
y += np.sqrt(resid_var) * np.random.normal(size=n)
# Put everything into a dataframe.
df = pd.DataFrame(xmat, columns=["x%d" % j for j in range(p)])
df["y"] = y + xmat[:, 0] - xmat[:, 3]
df["groups_ix"] = groups_ix
df["level1_ix"] = level1_ix
df["level2_ix"] = level2_ix
# Fit the model
cs = sm.cov_struct.Nested()
dep_fml = "0 + level1_ix + level2_ix"
gee = sm.GEE.from_formula(
"y ~ x0 + x1 + x2 + x3 + x4", cov_struct=cs, dep_data=dep_fml, groups="groups_ix", data=df
)
model = gee.fit()
return ModelWithResults(model=model, alg=gee, inference_dataframe=df)
def glm_model():
# Generalized Linear Model (GLM)
data = get_dataset("scotland")
data.exog = sm.add_constant(data.exog)
glm = sm.GLM(data.endog, data.exog, family=sm.families.Gamma())
model = glm.fit()
return ModelWithResults(model=model, alg=glm, inference_dataframe=data.exog)
def glmgam_model():
# Generalized Additive Model (GAM)
from statsmodels.gam.tests.test_penalized import df_autos
x_spline = df_autos[["weight", "hp"]]
bs = sm.gam.BSplines(x_spline, df=[12, 10], degree=[3, 3])
alpha = np.array([21833888.8, 6460.38479])
gam_bs = sm.GLMGam.from_formula(
"city_mpg ~ fuel + drive", data=df_autos, smoother=bs, alpha=alpha
)
model = gam_bs.fit()
return ModelWithResults(model=model, alg=gam_bs, inference_dataframe=df_autos)
def arma_model():
# Autoregressive Moving Average (ARMA)
np.random.seed(12345)
arparams = np.array([1, -0.75, 0.25])
maparams = np.array([1, 0.65, 0.35])
nobs = 250
y = arma_generate_sample(arparams, maparams, nobs)
dates = pd.date_range("1980-1-1", freq="M", periods=nobs)
y = pd.Series(y, index=dates)
arima = ARIMA(y, order=(2, 0, 2), trend="n")
model = arima.fit()
inference_dataframe = | pd.DataFrame([["1999-06-30", "2001-05-31"]], columns=["start", "end"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
pd.options.display.max_columns = None
class WaterQualityAssessor:
#initialize
def __init__(self, df, cols):
'''
Inputs
- df: pandas.DataFrame
- cols: list, list of analytical columns
'''
self.df = df
self.cols = cols
self.epa_standards = {'Alkalinity (mg/L HCO3)': 100, 'Aluminum (mg/L)': 0.05, 'Ammonia (mg/L)': 0.25,
'Antimony (mg/L)': 0.006, 'Arsenic (mg/L)': 0.01, 'Barium (mg/L)': 2, 'Beryllium (mg/L)': 0.004,
'Boron (mg/L)': 3, 'Bromine (mg/L)': 6, 'Bismuth (mg/L)': 0.01, 'Cadmium (mg/L)': 0.005,
'Calcium (mg/L)': 50, 'Cobalt (mg/L)': 0.006, 'Cerium (mg/L)': 0.0221, 'Chromium (mg/L)': 0.1,
'Chloride (mg/L)': 250, 'Copper (mg/L)': 1.0, 'Dysprosium (mg/L)': 0.00015, 'DOC (mg/L)': 2,
'Europium (mg/L)': 0.0001, 'Erbium (mg/L)': 0.000285, 'Fluoride (mg/L)': 4.0, 'pH': 8.5,
'Gadolinium (mg/L)': 0.000218, 'Gallium (mg/L)': 0.00001, 'Holmium (mg/L)': 0.000055, 'Iron (mg/L)': 0.3,
'Lead (mg/L)': 0.015, 'Lithium (mg/L)': 0.04, 'Lanthanum (mg/L)':0.001749, 'Lutetium (mg/L)': 0.000169,
'Mercury (mg/L)': 0.002, 'Magnesium (mg/L)': 10, 'Manganese (mg/L)': 0.3, 'Molybdenum (mg/L)': 0.04,
'Nitrate (mg/L)': 10, 'Nickel (mg/L)': 0.1,'Neodymium (mg/L)': 0.0018, 'Praseodymium (mg/L)': 0.000071,
'Potassium (mg/L)': 165.6, 'Phosphorus (mg/L)': 0.1, 'Rubidium (mg/L)': 0.0006,'Selenium (mg/L)': 0.05,
'Silica (mg/L)': 3.3, 'Sodium (mg/L)': 20, 'Samarium (mg/L)': 0.000094,'Tin (mg/L)': .010,
'Titanium (mg/L)': 0.0005, 'Thallium (mg/L)': 0.002, 'Terbium (mg/L)': 0.000088, 'Tungsten (mg/L)': 0.0002,
'Thulium (mg/L)': 0.00022, 'Sulfate (mg/L)': 250, 'Sulfur (mg/L)': 250, 'Strontium (mg/L)': 4,
'Silver (mg/L)': 0.1, 'TDS (mg/L)': 500, 'TSS (mg/L)': 155, 'TDN (mg/L)': 1, 'Uranium (mg/L)': 0.03,
'Vanadium (mg/L)': 0.015, 'Yttrium (mg/L)': 0.001188, 'Ytterbium (mg/L)': 0.000195, 'Zinc (mg/L)': 5}
def cleanup(self):
'''
Function to remove NaN's and special characters from dataframe
Inputs:
- Unedited dataframe
Output:
- Returns dataframe with no special characters or NaN's
'''
# Removes special characters from attributes
#cols = self.df.columns
special_character = ['<','>','--', 'ND', '\xa0']
#Make dictionary of old name and new name
newname = {}
for col in self.cols:
new = col
#<add your logic checks on col here>
for sc in special_character:
new = new.replace(sc,' ')
#Strip double spaces
while ' ' in new:
new=new.replace(' ',' ').strip()
newname[col] = new
self.df.rename(columns=newname,inplace=True)
self.cols = list(newname.values()) #double check this<<
# Converts all strings to floating-point numbers
self.df[self.cols] = self.df[self.cols].apply(pd.to_numeric,errors='coerce')
def same_units(self):
'''
Function that checks units of the water quality samples and returns dataframe with data displayed in units of mg/L
Only works for µg/L - for other units, multiply dataframe values by the appropriate conversion factor
Inputs:
- Cleaned up DataFrame of water quality samples
Output:
- Returns DataFrame with samples in units of mg/L and sample names sorted in alphabetical order
'''
# Replaces µg/L with mg/L and performs appropriate unit conversion
# To convert other units to mg/L, change the unit conversion factor in the code below
newcols = self.cols.copy()
for col in self.cols:
if 'µg/L' in str(col):
self.df[col.replace('µg/L','mg/L')] = self.df[col] * 0.001
newcols.remove(col)
newcols.append(col.replace('µg/L','mg/L'))
self.cols = newcols
new_df = self.df.filter(regex='mg/L')
return new_df.sort_index(axis=1)
def assessor(self):
'''
Function that loops through all samples in dataframe and compares values to maximum contaminant level (MCL)
established by the EPA, then displays the samples that measured higher than its corresponding MCL.
Inputs:
- DataFrame of water quality samples
Output:
- Assessment of sample values compared to corresponding MCL of that metal
'''
for index, row in self.df.iterrows():
for col in self.cols:
if row[col] == np.nan: continue
try:
self.df.at[index,col+'T'] = row[col] >= self.epa_standards[col]
except IndexError:
value = None
# Remove NaN's from dataframe
self.df.fillna('', inplace=True)
def print_exce(self):
'''
Function that lists the exceedances by frequency of occurence in descending order.
Inputs:
- DataFrame of water quality samples
Output:
- Returns ranked list of samples in descending order according to the exceedance frequency
'''
global list_exce
list_exce = []
for index, row in self.df.iterrows():
for col in self.cols: #iterrate through columns
if row[col+'T']:
list_exce.append([index, col, round(row[col], 5)]) #adjust as needed
df_exce = | pd.DataFrame(list_exce, columns=['Sample_Number','Sample_Name','Concentration (mg/L)']) | pandas.DataFrame |
# Copyright 2018 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pickle
from pathlib import Path
import inspect
import pandas as pd
import numpy as np
from moonshot import Moonshot, MoonshotML
from moonshot.cache import TMP_DIR
from quantrocket.exceptions import ImproperlyConfigured
from sklearn.tree import DecisionTreeClassifier
class HistoricalPricesCacheTestCase(unittest.TestCase):
def test_10_complain_if_houston_not_set(self):
"""
Tests that a "HOUSTON_URL not set" error is raised if a backtest is
run without mock. This is a control for later tests.
"""
# clear cache dir if any pickles are hanging around
files = glob.glob("{0}/moonshot_*.pkl".format(TMP_DIR))
for file in files:
os.remove(file)
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
with self.assertRaises(ImproperlyConfigured) as cm:
BuyBelow10().backtest()
self.assertIn("HOUSTON_URL is not set", repr(cm.exception))
def test_20_load_history_from_mock(self):
"""
Runs a strategy using mock to fill the history cache.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = BuyBelow10().backtest(end_date="2018-05-04")
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
0.0,
0.0,
1.0],
"FI23456": [1.0,
0.0,
1.0,
0.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.5,
0.0],
"FI23456": ["nan",
0.5,
0.5,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
-0.0],
"FI23456": [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
0.0]}
)
def test_30_load_history_from_cache(self):
"""
Re-Runs the strategy without using mock to show that the history
cache is used.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
results = BuyBelow10().backtest(end_date="2018-05-04")
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
0.0,
0.0,
1.0],
"FI23456": [1.0,
0.0,
1.0,
0.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.5,
0.0],
"FI23456": ["nan",
0.5,
0.5,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
-0.0],
"FI23456": [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
0.0]}
)
def test_40_dont_use_cache_if_different_params(self):
"""
Re-runs the strategy without using mock and specifying different DB
parameters so as not to use the cache, which should trigger
ImproperlyConfigured.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
DB_FIELDS = ["Open"]
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
with self.assertRaises(ImproperlyConfigured) as cm:
BuyBelow10().backtest(end_date="2018-05-04")
self.assertIn("HOUSTON_URL is not set", repr(cm.exception))
def test_50_dont_use_cache_if_no_cache(self):
"""
Re-runs the strategy without using mock and with the same DB
parameters but with no_cache=True, which should not use the cache and
thus should trigger ImproperlyConfigured.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
with self.assertRaises(ImproperlyConfigured) as cm:
BuyBelow10().backtest(end_date="2018-05-04", no_cache=True)
self.assertIn("HOUSTON_URL is not set", repr(cm.exception))
def test_60_use_cache_if_end_date_and_db_modified(self):
"""
Tests that if an end date is specified, the cache is used, even
though we pretend that the db was modified after the file was cached.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
def mock_list_databases(**kwargs):
return {
"postgres": [],
"sqlite": [{'last_modified': "2015-01-01T13:45:00",
'name': 'quantrocket.history.my-db1.sqlite',
'path': '/var/lib/quantrocket/quantrocket.history.my-db1.sqlite',
'size_in_mb': 3.1},
# Database was recently modified (in future)
{'last_modified': ( | pd.Timestamp.now() | pandas.Timestamp.now |
#!/usr/bin/env python3
# =========================================================================== #
# BASE #
# =========================================================================== #
# =========================================================================== #
# Project: Visualate #
# Version: 0.1.0 #
# File: \base.py #
# --------------- #
# Author: <NAME> #
# Company: Decision Scients #
# Email: <EMAIL> #
# --------------- #
# Create Date: Wednesday November 27th 2019, 10:28:47 am #
# Last Modified: Wednesday November 27th 2019, 12:51:57 pm #
# Modified By: <NAME> (<EMAIL>) #
# --------------- #
# License: Modified BSD #
# Copyright (c) 2019 Decision Scients #
# =========================================================================== #
"""Base class and interface for all Visualators"""
import os
import time
from abc import ABC, abstractmethod, ABCMeta
from itertools import chain
import math
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as po
from plotly.subplots import make_subplots
from sklearn.base import BaseEstimator
from ..supervised_learning.training.estimator import Estimator
from ..supervised_learning.regression import LinearRegression
from ..utils.model import get_model_name
from ..utils.misc import snake
from ..utils.file_manager import save_plotly
# --------------------------------------------------------------------------- #
# PlotKwargs #
# --------------------------------------------------------------------------- #
class PlotKwargs():
"""
Keyword Parameters
------------------
kwargs : dict
Keyword arguments that define the plot layout for visualization subclasses.
=========== ========== ===============================================
Property Format Description
----------- ---------- -----------------------------------------------
height int the height in pixels for the figure
line_color str default line color
margin dict margin in pixels. dict keys are l, t, and b
template str the theme template
test_alpha float opacity for objects associated with test data
test_color str color for objects associated with test data
train_alpha float opacity for objects associated with training data
train_color str color for objects associated with training data
width int the width in pixels of the figure
=========== ========== ===============================================
"""
# --------------------------------------------------------------------------- #
# BASE VISUALATOR #
# --------------------------------------------------------------------------- #
class BaseVisualator(ABC, BaseEstimator, metaclass=ABCMeta):
"""Abstact base class at the top of the visualator object hierarchy.
Class defines the interface for creating, storing and rendering
visualizations using Plotly.
Parameters
----------
title : str
The title for the plot. It defaults to the plot name and optionally
the model name.
kwargs : see docstring for PlotKwargs class.
Notes
-----
There are four types of visualization subclasses: DataVisualator,
ModelVisualator, GroupVisualator, and CrossVisualator. The DataVisualator
is used to analyze data prior to model building and selection.
The ModelVisualator renders visualizations for a single model.
GroupVisualator accepts a list of Visualator objects and delivers
visualizations using subplots. The CrossVisualator wraps a
Scikit-Learn GridSearchCV or RandomizedSearchCV object and presents
model selection visualizations. Those inherit directly from this class.
"""
DEFAULT_PARAMETERS = {'height': 450, 'line_color': 'darkgrey',
'margin': {'l':80, 't':100, 'b':80}, 'template': "none",
'test_alpha': 0.75, 'test_color': '#9fc377',
'train_alpha': 0.75, 'train_color': '#0272a2',
'width': 700}
ARRAY_LIKE = (np.ndarray, np.generic, list, pd.Series, \
pd.DataFrame, tuple)
NUMERIC_DATA_TYPES = ['int16', 'int32', 'int64', 'float16', \
'float32', 'float64']
CATEGORICAL_DATA_TYPES = ['category', 'object']
def __init__(self, name, title=None, **kwargs):
self.name = name
self.title = title
self.height = kwargs.get('height', self.DEFAULT_PARAMETERS['height'])
self.width = kwargs.get('width', self.DEFAULT_PARAMETERS['width'])
self.line_color = kwargs.get('line_color', \
self.DEFAULT_PARAMETERS['line_color'])
self.train_color = kwargs.get('train_color', \
self.DEFAULT_PARAMETERS['train_color'])
self.test_color = kwargs.get('test_color', \
self.DEFAULT_PARAMETERS['test_color'])
self.train_alpha = kwargs.get('train_alpha', \
self.DEFAULT_PARAMETERS['train_alpha'])
self.test_alpha = kwargs.get('test_alph', \
self.DEFAULT_PARAMETERS['test_alpha'])
self.template = kwargs.get('template', \
self.DEFAULT_PARAMETERS['template'])
self.margin = kwargs.get('margin', \
self.DEFAULT_PARAMETERS['margin'])
self.filetype = ".html"
@abstractmethod
def fit(self, X, y, **kwargs):
""" Fits the visualator to the data.
For DataVisualator classes, this method fits the data to the visualator.
For ModelVisulator classes, the fit method fits the data to an underlying
model. GroupVisulators iteratively fit several models to the data. The
CrossVisulators call the fit methods on GridSearchCV and RandomizedSearchCV
objects.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs: dict
Keyword arguments passed to the scikit-learn API.
See visualizer specific details for how to use
the kwargs to modify the visualization or fitting process.
Returns
-------
self : visualator
"""
pass
@abstractmethod
def show(self, path=None, **kwargs):
"""Renders the visualization.
Contains the Plotly code that renders the visualization
in a notebook or in a pop-up GUI. If the path variable
is not None, the visualization will be saved to disk.
Subclasses will override with visualization specific logic.
Parameters
----------
path : str
The relative directory and file name to which the visualization
will be saved.
kwargs : dict
Various keyword arguments
"""
pass
def save(self, fig, directory, filename):
"""Saves a plot to file.
Parameters
----------
fig : plotly figure object
The figure to be saved
directory : str
The name of the directory to which the file is to be saved
filename : str
The name of the file to be saved.
"""
save_plotly(fig, directory=directory, filename=filename)
# --------------------------------------------------------------------------- #
# DATA VISUALATOR #
# --------------------------------------------------------------------------- #
class DataVisualator(BaseVisualator):
"""Abstact base class for data visualators.
Class defines the interface for creating Plotly visualizations of data
prior to model building and selection.
Parameters
----------
name : str
The name of the data DataVisualator object. Should be lower snake
case, containing alphanumeric characters and underscores for
separation.
title : str, Optional
The title for the plot. It defaults to the plot name and optionally
the model name.
kwargs : see docstring for PlotKwargs class.
"""
def __init__(self, name, title=None, **kwargs):
"""Instantiate the object and specify data input requirements."""
super(DataVisualator, self).__init__(name=name, title=title,
**kwargs)
def _get_object_names(self, x):
"""Gets (col) names from pandas or numpy object or returns None."""
if isinstance(x, pd.DataFrame):
names = x.columns
elif isinstance(x, pd.Series):
names = x.name
elif isinstance(x, (np.generic, np.ndarray)):
names = x.dtype.names
else:
names = None
return names
def _generate_variable_names(self, x, target=False):
"""Generates a list of variable names based upon shape of x."""
if target:
var_names = ['target']
elif len(x.shape) == 1:
var_names = ['var_0']
elif x.shape[1] == 1:
var_names = ['var_0']
else:
var_names = ["var_" + str(i) for i in range(x.shape[1])]
return var_names
def _get_variable_names(self, x, target=False, **kwargs):
"""Gets variable names from object or generate a dummy name."""
# Obtain variable names from kwargs if available
var_names = None
if target:
var_names = kwargs.get('target_name', None)
else:
var_names = kwargs.get('feature_names', None)
if isinstance(var_names, self.ARRAY_LIKE):
return var_names
# Ok, try extracting variable names from the objects themselves
var_names = self._get_object_names(x)
if isinstance(var_names, self.ARRAY_LIKE):
return var_names
# Alright, let's create dummy variable names since none are available.
var_names = self._generate_variable_names(x, target)
return var_names
def _reformat(self, x, target=False, **kwargs):
"""Reformats data into a dataframe."""
var_names = self._get_variable_names(x, target, **kwargs)
if isinstance(x, pd.DataFrame):
return x
else:
return | pd.DataFrame(data=x, columns=var_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
import sys
import tensorflow.keras as k
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest
from sklearn import decomposition
from tqdm import tqdm
from scipy.stats import mode
def binarize_labels(labels):
"""
Change the labels to binary
:param labels: np array of digit labels
:return:
"""
labels = np.where(labels == 0, labels, 1)
return labels
def create_window_generator(window, batch_size, train_x, train_y, test_x, test_y, prediction_mode):
"""
Create a TF generator for sliding window
:param batch_size:
:param test_y:
:param test_x:
:param train_y:
:param train_x:
:param window:
:param prediction_mode:
:return:
"""
train_generator = k.preprocessing.sequence.TimeseriesGenerator(train_x, train_y,
length=window,
batch_size=batch_size)
test_generator = k.preprocessing.sequence.TimeseriesGenerator(test_x, test_y,
length=window,
batch_size=batch_size)
return train_generator, test_generator
def create_new_targets(window, data):
"""
Create the new targets for window classification. The new label will be the most common one in the whole window.
:param window:
:param data:
:return:
"""
new_data = np.apply_along_axis(lambda x: np.bincount(x).argmax(), axis=1, arr=(rolling_window(data.astype(int), 0, window)))
return new_data
def delete_padded_rows(data, labels, n_dimensions):
"""
Delete padded rows from the samples and return continuous data
:param labels:
:param data:
:param n_dimensions:
:return:
"""
labels = np.repeat(labels, data.shape[1])
data = data.reshape(-1, n_dimensions)
added_rows = np.where(np.all(data == 0, axis=1))
data = data[~added_rows[0]]
labels = labels[~added_rows[0]]
return data, labels
def drop_columns(df):
"""
:param df:
:return:
"""
df = df.reset_index()
df = df.drop(columns=['timestamp',
'output_int_register_25',
'output_int_register_26',
'output_bit_register_64',
'output_bit_register_65',
'output_bit_register_66',
'output_bit_register_67'], axis=1)
# Make it multiindex
df['event'] = df.index
df = df.set_index(['sample_nr', 'event'])
df = df.reset_index('event', drop=True)
df = df.set_index(df.groupby(level=0).cumcount().rename('event'), append=True)
df = df.sort_index()
return df
def filter_samples(df, normal_samples, damaged_samples, assembly_samples, missing_samples, damaged_thread_samples,
loosening_samples, move_samples):
"""
Take the requested percentage of each data type
:param df: df, data
:param normal_samples: float, percentage of normal samples to take
:param damaged_samples: float, percentage of damaged samples to take
:param assembly_samples: float, percentage of assembly samples to take
:param missing_samples: float, percentage of missing samples to take
:param damaged_thread_samples: float, percentage of damaged thread hole samples to take
:param loosening_samples: float, percentage of loosening samples to take
:param move_samples: float, percentage of movment samples to take
:return: df, the filtered data
"""
# Count the sample types
count_df = df.groupby(['sample_nr'])['label'].median()
unique, counts = np.unique(count_df, return_counts=True)
labels_count_dict = {A: B for A, B in zip(unique, counts)}
# Take only the amount of samples that's needed to fill the requirement
sampled_list = []
for label in labels_count_dict:
subindex = list(np.unique(df.loc[df['label'] == label].index.get_level_values(0)))
if label == 0:
to_take = normal_samples * labels_count_dict[0]
elif label == 1:
to_take = damaged_samples * labels_count_dict[1]
elif label == 2:
to_take = assembly_samples * labels_count_dict[2]
elif label == 3:
to_take = missing_samples * labels_count_dict[3]
elif label == 4:
to_take = damaged_thread_samples * labels_count_dict[4]
elif label == 5:
to_take = loosening_samples * labels_count_dict[5]
elif label == 6:
to_take = move_samples * labels_count_dict[6]
sample_ids = np.random.choice(subindex, int(to_take), replace=False)
sampled_df = df[df.index.get_level_values(0).isin(sample_ids)]
sampled_list.append(sampled_df)
taken_data = pd.concat(sampled_list, ignore_index=False).sort_values(['sample_nr', 'event'])
# Reset the sample numbers
taken_data = taken_data.reset_index()
taken_data['sample_nr'] = (taken_data['sample_nr'] != taken_data['sample_nr'].shift(1)).astype(int).cumsum()
taken_data['event'] = taken_data.index
taken_data = taken_data.set_index(['sample_nr', 'event'])
taken_data = taken_data.reset_index('event', drop=True)
taken_data = taken_data.set_index(taken_data.groupby(level=0).cumcount().rename('event'), append=True)
taken_data = taken_data.sort_index()
return taken_data
def load_dataset(path):
"""
Load data from the file
:param: path: path to the data
:return: pd dataframes, train & test data
"""
if '.h5' in str(path):
dataframe = pd.read_hdf(path)
elif '.pkl' in str(path):
dataframe = | pd.read_pickle(path) | pandas.read_pickle |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Unofficial python API to datashake reviews API
(https://www.datashake.com/review-scraper-api)
This module makes it easier to schedule jobs and fetch the results
Official web API documentation: https://api.datashake.com/#reviews
You need to have datashake API key to use this module
Authors:
<NAME> (<EMAIL>)
"""
import time
import math
import re
import datetime
import json
import requests
import pandas as pd
def _prepare_date(from_date):
"""
Private function to prepare from_date by converting
it to YYYY-MM-DD format.
"""
# check if from_date was provided and if it was provided in the right
# format
from_date_str = None
if from_date is not None:
if not isinstance(from_date, str):
try:
from_date_str = from_date.strftime('%Y-%m-%d')
except AttributeError:
raise ValueError(
f"""from_date must be a string in the format YYYY-MM-DD
or datetime. String provided: {from_date}"
"""
)
else:
# regex template for YYYY-MM-DD
pattern = re.compile("\\d{4}-\\d{2}-\\d{2}")
match = pattern.match(from_date)
if match is None:
raise ValueError(
f"""from_date must be a string in the format YYYY-MM-DD \
or datetime. String provided: {from_date}"
"""
)
from_date_str = from_date[0:10]
return from_date_str
class APIConnectionError(Exception):
"""Exception to handle errors while connecting to API"""
class APIResponseError(Exception):
"""Exception to handle errors received from API"""
class DatashakeReviewAPI:
"""
Class to manage Datashake Review API (https://api.datashake.com/#reviews)
Pratameters
-----------
api_key : str, 40-symbol api key for Datashake Reviews.
Must be obtained on their website
max_requests_per_second : number of requests allowed to be send to
the API service per second.
Introduced to avoid 429 status code (Too Many Requests)
Link to the Datashake doc: https://api.datashake.com/#rate-limiting
language_code : str, default='en'. Language code of the reviews.
allow_response : boolean, default=True
min_days_since_last_crawl : int, default=3 - the number of days
that need to pass since the last crawl to launch another one
"""
def __init__(self, api_key, max_requests_per_second=10,
language_code='en', allow_response=True,
min_days_since_last_crawl=3):
self.api_key = str(api_key)
if len(self.api_key) != 40:
raise ValueError(f"""api_key must be 40 symbols long, \
the key provided was {len(self.api_key)} symbols long"\
""")
self.max_requests_per_second = max_requests_per_second
self.language_code = str(language_code)
self.allow_response = str(allow_response)
self.min_days_since_last_crawl = min_days_since_last_crawl
# setting up hidden attribues
self.__time_counter = 0 # counts in seconds
self.__requests_done = 0
self.reviews_per_page = 500
def __check_load_and_wait(self):
"""
Hidden method to check workload of requests to API
and wait to ensure the number of requests
sent to API stays within the threshold
Attribute max_requests_per_second regulates the behaviour
of this method.
More info here: https://api.datashake.com/#rate-limiting
"""
if self.__time_counter == 0:
self.__time_counter = time.perf_counter()
elif (time.perf_counter() - self.__time_counter) > 1.0:
self.__time_counter = time.perf_counter()
self.__requests_done = 1
elif self.__requests_done < self.max_requests_per_second:
self.__requests_done += 1
else:
wait_secs = 1.0 - (time.perf_counter() - self.__time_counter) + 0.1
print(f'API overload risk, waiting for {wait_secs} seconds')
time.sleep(wait_secs)
self.__requests_done = 1
self.__time_counter = time.perf_counter()
def get_job_status(self, job_id):
"""
Returns the status of the scheduled review job
Parameters
----------
job_id : str, identificator of the scheduled job
Returns
-------
Dictionary with the job status results. Example:
{'success': True,
'status': 200,
'job_id': 278171040,
'source_url': 'https://uk.trustpilot.com/review/uk.iqos.com',
'source_name': 'trustpilot',
'place_id': None,
'external_identifier': None,
'meta_data': None,
'unique_id': None,
'review_count': 3400,
'average_rating': 4.5,
'last_crawl': '2021-09-28',
'crawl_status': 'complete',
'percentage_complete': 100,
'result_count': 3401,
'credits_used': 3409,
'from_date': '2017-01-01',
'blocks': None}
"""
url = "https://app.datashake.com/api/v2/profiles/info"
querystring = {"job_id": str(job_id)}
headers = {
'spiderman-token': self.api_key,
}
self.__check_load_and_wait()
response = requests.request("GET", url, headers=headers,
params=querystring)
if response.ok is False:
error_str = 'API Connection Error. '
error_str += f"""Error code: {response.status_code} - \
{response.reason}. URL: {url}"""
raise APIConnectionError(error_str)
if response.json()['success'] is False:
error_str = 'API Response Error. '
error_str += f"{response.text}. Job ID: {job_id}. URL: {url}"
raise APIResponseError(error_str)
return response.json()
def get_job_reviews(self, job_id, from_date=None):
"""
Return job status and reviews scraped within the sepcified job if
job is finished.
If gob is not finished, the reviews results will be empty
Parameters
----------
job_id : str, identificator of the job_id that was scheduled to
scrape the reviews.
from_date : str or datetime, optional. If not provided, all reviews
will be queried.
If from date was provided while scheduling the job you can't get
any reviews before that date with this method.
Returns
-------
tuple containing:
dictionary with the job_status from the API
pandas Dataframe with reviews
"""
from_date_str = _prepare_date(from_date)
df_reviews = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from sklearn import metrics
from epiquark import ScoreCalculator
def test_non_case_imputation(shared_datadir, paper_example_score: ScoreCalculator) -> None:
cases = pd.read_csv(shared_datadir / "paper_example/cases_long.csv")
imputed = paper_example_score._impute_non_case(cases)
imputed_expected = pd.read_csv(shared_datadir / "paper_example/non_case_imputed_long.csv")
| pd.testing.assert_frame_equal(imputed, imputed_expected, check_dtype=False) | pandas.testing.assert_frame_equal |
"""
Utility functions for I/O.
Written by <NAME> and <NAME>,
Gradient Institute Ltd. (<EMAIL>).
Copyright © 2020 Monetary Authority of Singapore
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pathlib
from os import path
import pandas as pd
COVARIATES_FILE = "model_inputs.csv"
SENSITIVE_FILE = "sensitive_attributes.csv"
OUTCOMES_FILE = "outcomes.csv"
TRUTH_FILE = "truth.csv"
INDEX_LABEL = "ID"
def load_data():
"""Load the simulation data, run the simulation if no data exists."""
datapath = pathlib.Path(__file__).parent.absolute()
files = [COVARIATES_FILE, SENSITIVE_FILE, OUTCOMES_FILE,
TRUTH_FILE]
paths = [path.join(datapath, f) for f in files]
data = tuple( | pd.read_csv(f, index_col=INDEX_LABEL) | pandas.read_csv |
import pandas as pd
from py_expression_eval import Parser
math_parser = Parser()
def _get_mz_tolerance(qualifiers, mz):
if qualifiers is None:
return 0.1
if "qualifierppmtolerance" in qualifiers:
ppm = qualifiers["qualifierppmtolerance"]["value"]
mz_tol = abs(ppm * mz / 1000000)
return mz_tol
if "qualifiermztolerance" in qualifiers:
return qualifiers["qualifiermztolerance"]["value"]
return 0.1
def _get_massdefect_min(qualifiers):
if qualifiers is None:
return 0, 1
if "qualifiermassdefect" in qualifiers:
return qualifiers["qualifiermassdefect"]["min"], qualifiers["qualifiermassdefect"]["max"]
return 0, 1
def _get_minintensity(qualifier):
"""
Returns absolute min and relative min
Args:
qualifier ([type]): [description]
Returns:
[type]: [description]
"""
min_intensity = 0
min_percent_intensity = 0
min_tic_percent_intensity = 0
if qualifier is None:
min_intensity = 0
min_percent_intensity = 0
return min_intensity, min_percent_intensity, min_tic_percent_intensity
if "qualifierintensityvalue" in qualifier:
min_intensity = float(qualifier["qualifierintensityvalue"]["value"])
if "qualifierintensitypercent" in qualifier:
min_percent_intensity = float(qualifier["qualifierintensitypercent"]["value"]) / 100
if "qualifierintensityticpercent" in qualifier:
min_tic_percent_intensity = float(qualifier["qualifierintensityticpercent"]["value"]) / 100
# since the subsequent comparison is a strict greater than, if people set it to 100, then they won't get anything.
min_percent_intensity = min(min_percent_intensity, 0.99)
return min_intensity, min_percent_intensity, min_tic_percent_intensity
def _get_exclusion_flag(qualifiers):
if qualifiers is None:
return False
if "qualifierexcluded" in qualifiers:
return True
return False
def _set_intensity_register(ms_filtered_df, register_dict, condition):
if "qualifiers" in condition:
if "qualifierintensityreference" in condition["qualifiers"]:
qualifier_variable = condition["qualifiers"]["qualifierintensitymatch"]["value"]
grouped_df = ms_filtered_df.groupby("scan").sum().reset_index()
for grouped_scan in grouped_df.to_dict(orient="records"):
# Saving into the register
key = "scan:{}:variable:{}".format(grouped_scan["scan"], qualifier_variable)
register_dict[key] = grouped_scan["i"]
return
def _filter_intensitymatch(ms_filtered_df, register_dict, condition):
if "qualifiers" in condition:
if "qualifierintensitymatch" in condition["qualifiers"] and \
"qualifierintensitytolpercent" in condition["qualifiers"]:
qualifier_expression = condition["qualifiers"]["qualifierintensitymatch"]["value"]
qualifier_variable = qualifier_expression[0] #TODO: This assumes the variable is the first character in the expression, likely a bad assumption
grouped_df = ms_filtered_df.groupby("scan").sum().reset_index()
filtered_grouped_scans = []
for grouped_scan in grouped_df.to_dict(orient="records"):
# Reading from the register
key = "scan:{}:variable:{}".format(grouped_scan["scan"], qualifier_variable)
if key in register_dict:
register_value = register_dict[key]
evaluated_new_expression = math_parser.parse(qualifier_expression).evaluate({
qualifier_variable : register_value
})
min_match_intensity, max_match_intensity = _get_intensitymatch_range(condition["qualifiers"], evaluated_new_expression)
scan_intensity = grouped_scan["i"]
#print(key, scan_intensity, qualifier_expression, min_match_intensity, max_match_intensity, grouped_scan)
if scan_intensity > min_match_intensity and \
scan_intensity < max_match_intensity:
filtered_grouped_scans.append(grouped_scan)
else:
# Its not in the register, which means we don't find it
continue
return | pd.DataFrame(filtered_grouped_scans) | pandas.DataFrame |
import pandas as pd
from texthero import nlp, visualization, preprocessing, representation
from . import PandasTestCase
import unittest
import string
from parameterized import parameterized
# Define valid inputs for different functions.
s_text = pd.Series(["Test"], index=[5])
s_tokenized_lists = pd.Series([["Test", "Test2"], ["Test3"]], index=[5, 6])
s_numeric = pd.Series([5.0], index=[5])
s_numeric_lists = pd.Series([[5.0, 5.0], [6.0, 6.0]], index=[5, 6])
# Define all test cases. Every test case is a list
# of [name of test case, function to test, tuple of valid input for the function].
# First argument of valid input has to be the Pandas Series where we
# want to keep the index. If this is different for a function, a separate
# test case has to implemented in the class below.
# The tests will be run by AbstractIndexTest below through the @parameterized
# decorator.
# The names will be expanded automatically, so e.g. "named_entities"
# creates test cases test_correct_index_named_entities and test_incorrect_index_named_entities.
test_cases_nlp = [
["named_entities", nlp.named_entities, (s_text,)],
["noun_chunks", nlp.noun_chunks, (s_text,)],
]
test_cases_preprocessing = [
["fillna", preprocessing.fillna, (s_text,)],
["lowercase", preprocessing.lowercase, (s_text,)],
["replace_digits", preprocessing.replace_digits, (s_text, "")],
["remove_digits", preprocessing.remove_digits, (s_text,)],
["replace_punctuation", preprocessing.replace_punctuation, (s_text, "")],
["remove_punctuation", preprocessing.remove_punctuation, (s_text,)],
["remove_diacritics", preprocessing.remove_diacritics, (s_text,)],
["remove_whitespace", preprocessing.remove_whitespace, (s_text,)],
["replace_stopwords", preprocessing.replace_stopwords, (s_text, "")],
["remove_stopwords", preprocessing.remove_stopwords, (s_text,)],
["stem", preprocessing.stem, (s_text,)],
["clean", preprocessing.clean, (s_text,)],
["remove_round_brackets", preprocessing.remove_round_brackets, (s_text,)],
["remove_curly_brackets", preprocessing.remove_curly_brackets, (s_text,)],
["remove_square_brackets", preprocessing.remove_square_brackets, (s_text,)],
["remove_angle_brackets", preprocessing.remove_angle_brackets, (s_text,)],
["remove_brackets", preprocessing.remove_brackets, (s_text,)],
["remove_html_tags", preprocessing.remove_html_tags, (s_text,)],
["tokenize", preprocessing.tokenize, (s_text,)],
["phrases", preprocessing.phrases, (s_tokenized_lists,)],
["replace_urls", preprocessing.replace_urls, (s_text, "")],
["remove_urls", preprocessing.remove_urls, (s_text,)],
["replace_tags", preprocessing.replace_tags, (s_text, "")],
["remove_tags", preprocessing.remove_tags, (s_text,)],
]
test_cases_representation = [
[
"count",
lambda x: representation.flatten(representation.count(x)),
(s_tokenized_lists,),
],
[
"term_frequency",
lambda x: representation.flatten(representation.term_frequency(x)),
(s_tokenized_lists,),
],
[
"tfidf",
lambda x: representation.flatten(representation.tfidf(x)),
(s_tokenized_lists,),
],
["pca", representation.pca, (s_numeric_lists, 0)],
["nmf", representation.nmf, (s_numeric_lists,)],
["tsne", representation.tsne, (s_numeric_lists,)],
["kmeans", representation.kmeans, (s_numeric_lists, 1)],
["dbscan", representation.dbscan, (s_numeric_lists,)],
["meanshift", representation.meanshift, (s_numeric_lists,)],
]
test_cases_visualization = []
test_cases = (
test_cases_nlp
+ test_cases_preprocessing
+ test_cases_representation
+ test_cases_visualization
)
class AbstractIndexTest(PandasTestCase):
"""
Class for index test cases. Tests for all cases
in test_cases whether the input's index is correctly
preserved by the function. Some function's tests
are implemented manually as they take different inputs.
"""
"""
Tests defined in test_cases above.
"""
@parameterized.expand(test_cases)
def test_correct_index(self, name, test_function, valid_input):
s = valid_input[0]
result_s = test_function(*valid_input)
t_same_index = | pd.Series(s.values, s.index) | pandas.Series |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([ | Timestamp("20130101") | pandas.Timestamp |
# Lint as: python3
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests `utils.py`."""
import functools
import os
import tempfile
import unittest
import mock
import pandas as pd
from pandas import testing as pdt
import tensorflow as tf
from tfrecorder import beam_image
from tfrecorder import constants
from tfrecorder import utils
from tfrecorder import test_utils
from tfrecorder import input_schema
from tfrecorder import dataset_loader
# pylint: disable=protected-access
class CheckTFRecordsTest(unittest.TestCase):
"""Tests `check_tfrecords`."""
def setUp(self):
"""Test setup."""
image_height = 40
image_width = 30
image_channels = 3
image_fn = functools.partial(
test_utils.make_random_image, image_height, image_width,
image_channels)
data = test_utils.get_test_data()
schema = input_schema.IMAGE_CSV_SCHEMA
image_uri_key = schema.image_uri_key
num_records = len(data[image_uri_key])
image_uris = data.pop(image_uri_key)
data['image_name'] = [os.path.split(uri)[-1] for uri in image_uris]
data.update({
'image': [beam_image.encode(image_fn())
for _ in range(num_records)],
'image_height': [image_height] * num_records,
'image_width': [image_width] * num_records,
'image_channels': [image_channels] * num_records,
})
self.tfrecord_dir = 'gs://path/to/tfrecords/dir'
self.split = 'TRAIN'
self.num_records = num_records
self.data = data
self.dataset = tf.data.Dataset.from_tensor_slices(self.data)
@mock.patch.object(dataset_loader, 'load', autospec=True)
def test_valid_records(self, mock_fn):
"""Tests valid case on reading multiple records."""
mock_fn.return_value = {self.split: self.dataset}
num_records = len(self.data['image'])
with tempfile.TemporaryDirectory(dir='/tmp') as dir_:
actual_dir = utils.inspect(
self.tfrecord_dir, split=self.split, num_records=num_records,
output_dir=dir_)
self.assertTrue('check-tfrecords-' in actual_dir)
actual_csv = os.path.join(actual_dir, 'data.csv')
self.assertTrue(os.path.exists(actual_csv))
_ = self.data.pop('image')
# Check output CSV
actual_df = | pd.read_csv(actual_csv) | pandas.read_csv |
'''
PipelineTranscriptDiffExpression.py - Utility functions for
pipeline_transcriptdiffexpression.py
==============================================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import cgatpipelines.tasks.expression as Expression
import cgatpipelines.tasks.counts as Counts
import cgatcore.iotools as iotools
from cgatcore import pipeline as P
from cgatcore.pipeline import cluster_runnable
from rpy2.robjects import r as R
import pandas as pd
import numpy as np
import sqlite3
import os
def connect(database, annotations_database):
'''utility function to connect to database.
Use this method to connect to the pipeline database.
Additional databases can be attached here as well.
Returns an sqlite3 database handle.
'''
dbh = sqlite3.connect(database)
statement = '''ATTACH DATABASE '%s' as annotations''' % (
annotations_database)
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
@cluster_runnable
def runSleuth(design, base_dir, model, contrasts, outfile, counts, tpm,
fdr, lrt=False, reduced_model=None):
''' run sleuth. Note: all samples in the design table must also
have a directory with the same name in `base_dir` with kallisto
results in a file called abundance.h5'''
outfile_prefix = P.snip(outfile, ".tsv")
Design = Expression.ExperimentalDesign(design)
exp = Expression.DEExperiment_Sleuth()
res = exp.run(Design, base_dir, model, contrasts, outfile_prefix,
counts, tpm, fdr, lrt, reduced_model)
res.getResults(fdr)
for contrast in set(res.table['contrast']):
res.plotMA(contrast, outfile_prefix)
res.plotVolcano(contrast, outfile_prefix)
res.table.to_csv(outfile, sep="\t", index=False)
@cluster_runnable
def runSleuthAll(samples, base_dir, counts, tpm):
''' run sleuth for all samples to obtain counts and tpm tables
Note: all samples in the design table must also
have a directory with the same name in `base_dir` with kallisto
results in a file called abundance.h5
'''
design = pd.DataFrame({
"group": ([0, 1] * ((len(samples) + 1) / 2))[0:len(samples)],
"include": [1, ] * len(samples),
"pair": [0, ] * len(samples)})
design.index = samples
Design = Expression.ExperimentalDesign(design)
exp = Expression.DEExperiment_Sleuth()
res = exp.run(Design, base_dir, counts=counts, tpm=tpm,
model="~group", dummy_run=True)
@cluster_runnable
def makeExpressionSummaryPlots(counts_inf, design_inf, logfile):
''' use the plotting methods for Counts object to make summary plots'''
with iotools.openFile(logfile, "w") as log:
plot_prefix = P.snip(logfile, ".log")
# need to manually read in data as index column is not the first column
counts = Counts.Counts(pd.read_table(counts_inf, sep="\t"))
counts.table.set_index(["transcript_id"])
design = Expression.ExperimentalDesign(design_inf)
# make certain counts table only include samples in design
counts.restrict(design)
cor_outfile = plot_prefix + "_pairwise_correlations.png"
pca_var_outfile = plot_prefix + "_pca_variance.png"
pca1_outfile = plot_prefix + "_pc1_pc2.png"
pca2_outfile = plot_prefix + "_pc3_pc4.png"
heatmap_outfile = plot_prefix + "_heatmap.png"
counts_log10 = counts.log(base=10, pseudocount=0.1, inplace=False)
counts_highExp = counts_log10.clone()
counts_highExp.table['order'] = counts_highExp.table.apply(
np.mean, axis=1)
counts_highExp.table.sort(["order"], ascending=0, inplace=True)
counts_highExp.table = counts_highExp.table.iloc[0:500, :]
counts_highExp.table.drop("order", axis=1, inplace=True)
log.write("plot correlations: %s\n" % cor_outfile)
counts_log10.plotPairwiseCorrelations(cor_outfile, subset=1000)
log.write("plot pc3,pc4: %s\n" % pca1_outfile)
counts_log10.plotPCA(design,
pca_var_outfile, pca1_outfile,
x_axis="PC1", y_axis="PC2",
colour="group", shape="group")
log.write("plot pc3,pc4: %s\n" % pca2_outfile)
counts_log10.plotPCA(design,
pca_var_outfile, pca2_outfile,
x_axis="PC3", y_axis="PC4",
colour="group", shape="group")
log.write("plot heatmap: %s\n" % heatmap_outfile)
counts_highExp.heatmap(heatmap_outfile)
@cluster_runnable
def identifyLowConfidenceTranscripts(infile, outfile):
''' identify transcripts which cannot be confidently quantified in
the simulation '''
df = pd.read_table(infile, sep="\t", index_col=0)
with iotools.openFile(outfile, "w") as outf:
outf.write("%s\t%s\n" % ("transcript_id", "reason"))
# identify transcript with low fraction of kmers - these show
# poorer correlation between ground truth and esimated counts
low_fraction = df[df['fraction_bin'] < 0.03].index.tolist()
for transcript in low_fraction:
outf.write("%s\t%s\n" % (transcript, "low_kmers"))
# identify transcript with poor accuracy of quantification
low_accuracy = df[[abs(x) > 0.585 for x in
df['log2diff_tpm']]].index.tolist()
for transcript in low_accuracy:
outf.write("%s\t%s\n" % (transcript, "poor_accuracy"))
@cluster_runnable
def mergeAbundanceCounts(infile, outfile, counts):
''' merge the abundance and simulation counts files for
each simulation '''
df_abund = pd.read_table(infile, sep="\t", index_col=0)
df_counts = pd.read_table(counts, sep="\t", index_col=0)
df_abund.columns = [x if x != "tpm" else "est_tpm"
for x in df_abund.columns]
df_merge = pd.merge(df_abund, df_counts, left_index=True, right_index=True)
df_merge.index.name = "id"
df_merge.to_csv(outfile, sep="\t")
@cluster_runnable
def calculateCorrelations(infiles, outfile, bin_step=1):
''' calculate correlation across simulation iterations per transcript'''
abund, kmers = infiles
df_abund = pd.read_table(abund, sep="\t", index_col=0)
df_kmer = pd.read_table(kmers, sep="\t", index_col=0)
# this is hacky, it's doing all against all correlations for the
# two columns and subsetting
df_agg_tpm = df_abund.groupby(level=0)[[
"est_tpm", "tpm"]].corr().ix[0::2, 'tpm']
# drop the "read_count" level, make into dataframe and rename column
df_agg_tpm.index = df_agg_tpm.index.droplevel(1)
df_agg_tpm = | pd.DataFrame(df_agg_tpm) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# The algorithm part
import numpy as np
import pandas as pd
import nltk
# Download required packages
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
class APIRecommender:
def __init__(self):
# Create dataframes from csv, used to retrieve relevant info
self.apis_df = pd.read_csv('../datasets/apis_processed.csv')
self.mashups_df = | pd.read_csv('../datasets/mashups_processed.csv') | pandas.read_csv |
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import sys
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebPage
import bs4 as bs
import urllib.request
import os
import datetime
path_of_brandwise = 'C:\\LavaWebScraper\\BrandWiseFiles\\'
## THIS FILE IS A COMMON FORMAT FOR SCRAPING THE WEBSITES. DO NOT CHANGE THE VARIABLE NAMES ALREADY SPECIFIED IN THIS FILE.
## YOU MAY HOWEVER ADD YOUR OWN VARIABLES.
## FINAL LIST OF RESULTS SHOULD BE AVAILABLE IN A LIST 'RECORDS' NECESSARILY FOR THE SAKE OF CONSISTENCY.
############## VARIABLES NEEDED : DO NOT CHANGE THE VARIABLE NAMES. JUST FILL IN THEIR VALUES WHEREVER REQUIRED. ####################
base_url = 'https://xiaomi-mi.com/mi-smartphones/'
ur = 'https://xiaomi-mi.com'
country = 'CHINA'
company = 'XIAOMI'
model_list = []
usp = []
display_list = []
memory_list = []
processor_list = []
camera_list = []
battery_list = []
thickness_list = []
extras_links = []
records = []
href = []
####################################################################################################################################
################ DECLARE HERE THE EXTRA VARIABLES NEEDED ###########################################################################
ii = []
shref = []
####################################################################################################################################
try:
r = requests.get(base_url)
soup = BeautifulSoup(r.text, 'html.parser')
except:
print('ERROR : BASE URL OF THE WEBSITE IS INVALID/DOWN.')
s1 = soup.find('div', class_='content').find_all('div', class_='item-title')
for s in s1:
href.append(ur + s.find('a')['href'])
model_list.append(s.text.strip().strip('\n'))
for i in range(len(model_list)):
if 'Accessories' in model_list[i]:
ii.append(i)
### REMOVING THE LINKS OF ACCESSORIES ###
x=0
while x<len(model_list):
if x in ii:
model_list.pop(x)
href.pop(x)
x = x + 1
model_list = []
########################################
for i in range(len(href)):
print('MAIN MODEL NO.: %d' %i)
r = requests.get(href[i])
soup = BeautifulSoup(r.text, 'html.parser')
s3 = soup.find_all('div', class_='item-title')
for s in s3:
#model_list.append(s.text.strip().strip('\n'))
ts = s.find('a')['href']
if 'http' not in ts:
ts = ur + ts
if 'https://nis-store.com' not in s.find('a')['href']:
shref.append(ts)
model_list.append(s.find('a').text.strip().strip('\n'))
print('---------------------------------------------------------------------------------------------------------------')
href = shref[:]
print(len(href))
for i in range(len(href)):
dd = ''
pp = ''
mm = ''
cc = ''
print(href[i])
heads = []
dets = []
print('SUB MODEL NO.: %d' %i)
r = requests.get(href[i])
soup = BeautifulSoup(r.text, 'html.parser')
try:
usp_text = soup.find('div', class_='content-text').find('p').text
usp.append(usp_text.strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' '))
except:
usp.append('Not Available')
try: ### LEAVING OUT THE MODELS
s4 = soup.find('table').find_all('tr')
for j in range(len(s4)):
s5 = s4[j].find_all('td')
heads.append(s5[0].text.strip().strip('\n'))
dets.append(s5[1].text.strip().strip('\n'))
for j in range(len(heads)):
if 'dimensions' in heads[j].lower():
thickness_list.append(dets[j].strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' '))
if 'display type' in heads[j].lower() or 'size' in heads[j].lower():
dd = dd + dets[j].strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' ') + ' || '
if 'chipset' in heads[j].lower():
pp = pp + dets[j].strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' ') + ' || '
if 'RAM' in heads[j]:
mm = mm + 'RAM : ' + dets[j].strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' ') + ' || '
if 'memory' in heads[j].lower():
mm = mm + 'ROM : ' + dets[j].strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' ') + ' || '
if 'primary camera' in heads[j].lower() or 'Primary camera' in heads[j]:
match = re.search(r'\d+\.*\d*\s*MP', dets[j])
try:
st = str(match.group())
cc = cc + 'Primary Camera : ' + st.strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' ') + ' || '
except:
pass
if 'secondary camera' in heads[j].lower() or 'Secondary camera' in heads[j]:
match = re.search(r'\d+\.*\d*\s*MP', dets[j])
try:
st = str(match.group())
cc = cc + 'Secondary Camera : ' + st.strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' ') + ' || '
except:
pass
if 'battery' in heads[j].lower():
match = re.search(r'\d+\s*mAh', dets[j])
try:
st = str(match.group())
except:
st = 'Not Available'
battery_list.append(st.strip().strip('\n').replace('<br>', '').replace('\n', ' ').replace(';', ' '))
if dd!='':
display_list.append(dd)
if pp!='':
processor_list.append(pp)
if mm!='':
memory_list.append(mm)
if cc!='':
camera_list.append(cc)
except:
print('NO SPECS FOR THIS MODEL AVAILABLE.')
pass
if len(display_list)==i:
display_list.append('Not Available')
if len(processor_list)==i:
processor_list.append('Not Available')
if len(memory_list)==i:
memory_list.append('Not Available')
if len(camera_list)==i:
camera_list.append('Not Available')
if len(battery_list)==i:
battery_list.append('Not Available')
if len(thickness_list)==i:
thickness_list.append('Not Available')
print('LENGTH OF THICKNESS LIST: %d' %len(thickness_list))
print('LENGTH OF PROCESSOR LIST: %d' %len(processor_list))
print('LENGTH OF CAMERA LIST: %d' %len(camera_list))
print('LENGTH OF BATTERY LIST: %d' %len(battery_list))
print('LENGTH OF DISPLAY LIST: %d' %len(display_list))
print('LENGTH OF MEMORY LIST: %d' %len(memory_list))
extras_links = href
############# WRITING TO CSV : DO NOT MAKE ANY CHANGES TO THIS PART EXCEPT WRITING THE FILE NAME. ###################################
for i in range(len(model_list)):
records.append((country, company, model_list[i], usp[i], display_list[i], camera_list[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))
df = | pd.DataFrame(records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS']) | pandas.DataFrame |
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import annotations
__version__ = '1.1b'
import os
from multiprocessing import Pool
from typing import Any, List, Union
import numpy as np
import pandas as pd
import plotly.express as px
from pymatgen import Composition
from Xerus.db.localdb import LocalDB
from Xerus.engine.gsas2riet import refine_comb, simulate_spectra
from Xerus.engine.gsas2utils import make_gpx
from Xerus.engine.gsas2viz import plot_all_gpx, plot_gpx
from Xerus.engine.gsas2wrap import GSASRefiner
from Xerus.readers.datareader import DataReader
from Xerus.settings.settings import GSAS2_BIN, INSTR_PARAMS
from Xerus.similarity.pattern_removal import (combine_pos,
run_correlation_analysis,
run_correlation_analysis_riet)
from Xerus.similarity.visualization import make_plot_all, make_plot_step
from Xerus.utils.cifutils import make_system
from Xerus.utils.preprocessing import remove_baseline, standarize_intensity
from Xerus.utils.tools import (create_folder, group_data, load_json,
make_offset, normalize_formula)
from Xerus.utils.tools import plotly_add as to_add
from Xerus.utils.tools import save_json
class XRay:
"""
Main handler for detecting phases of experimental XRD patterns
Parameters
----------
name : Sample / Project Name (str)
working_folder : Folder to save analysis results (str)
elements : A list of possible elements in the pattern to be analyze. Ie: For HoB2 elements = ['Ho', 'B'].
exp_data_file : Path to experimental data to be analyzed
data_fmt : Experimental data format. Note that it has to be supported by the DataExtensions/DataReader
maxsys : Limit search to a maximum system size. Ie, if maxsys=3 and len(elements) = 4, it searchs only up to ternary systems
max_oxy : Similar to above but for oxides restriction. For example, if max_oxy = 2, and element list is ['Ho', 'B', 'O'],
the combinations to be search for oxygen containing systems will be only up to Ho-O and B-O, Ho-B-O will not be searched.
remove_background: bool, default: True
Argument to remove background (baseline) of XRD pattern. Defautls to True.
If set to True, it will remove using the baseline estimation algorithm provided by Peakutils (polynomial)
poly_degree: int, default: 8
Polynomial degree to pass to peakutils.baseline for background estimation.
Only relevant IF remove_background is True
standarize_int: bool, default: True
To standarize the intensity of the XRD-pattern by diving by Imax or not.
use_preprocessed: bool, default: True
If set to True, for GSASII refinements it will use the intensity-standarized data.
(Note: It will only use intensity standarized data for refinement, NOT background removed data.)
"""
def __init__(
self,
name: str,
working_folder: str,
elements: List[str],
exp_data_file: str,
data_fmt: str = "auto",
maxsys: Union[Any, float] = None,
max_oxy: int = 2,
remove_background: bool = False,
poly_degree: int = 8,
standarize_int: bool = True,
use_preprocessed: bool = True,
):
if data_fmt == "auto":
data_fmt = exp_data_file.split(".")[-1].lower()
print(f"No datafmt passed. Assuming its {data_fmt}")
self.exp_data, self.tmin, self.tmax, self.step = DataReader().read_data(
exp_data_file, data_fmt
) # Read up the data
self.standarize = standarize_int
self.datafmt = data_fmt
self.rb = remove_background
self.pd = poly_degree
self.exp_data_file = exp_data_file
self.name = name
self.working_folder = working_folder # Set up working folder
create_folder(working_folder) # Create folder if doest not exist.
self.elements = elements # Set up elements
self.instr_params = INSTR_PARAMS # Instr params from your xrday machines
self.filename = os.path.basename(exp_data_file)
self.cif_all = None
self.cif_info = None
self.cif_notsim = None
self.cif_notran = None
self.simulated_gsas2 = []
self.reflections_gsas2 = []
self._notrun = []
self.cif_info = None
self.chosen_ids = []
self.modified_df = None
self.max_oxy = max_oxy
self._gpx_best = None
self._cif_best = None
self._rwp_best = None
self._results = None
self.optimizer = None
self.optgpx = None
self.optlat = None
self.optrwp = None
self.use_preprocessed = use_preprocessed
# Check up Preprocessing
if standarize_int:
print("Standarizing intensity to [0,1]..")
self.exp_data = standarize_intensity(self.exp_data)
if use_preprocessed:
preprocess_name = f"{self.filename.split('.')[0]}_preprocessed.csv"
self._preprocess_path = os.path.join(self.working_folder, preprocess_name)
export_data = self.exp_data.copy()
export_data.drop(["filename"], axis=1, inplace=True)
export_data.to_csv(self._preprocess_path, index=False, header=False)
self._old_data = self.exp_data_file
self._old_fmt = self.datafmt
print(f"Exported new datafile at {self._preprocess_path}")
if remove_background:
print(f"Removing background using polynomial degree: {poly_degree}")
self.exp_data = remove_baseline(
self.exp_data, poly_degree=poly_degree, plot=True
)
@property
def rwp(self):
if self._rwp_best is not None:
return self._rwp_best
else:
return "Please run analyze() function first"
@rwp.setter
def rwp(self, new_value):
self._rwp_best = new_value
@property
def best_gpx(self):
if self._gpx_best is not None:
return self._gpx_best
else:
return "Please run analyze() function first"
@best_gpx.setter
def best_gpx(self, new_value):
self._gpx_best = new_value
@property
def cifs_best(self):
if self._cif_best is not None:
return self._cif_best
else:
return "Please run analyze() function first"
@cifs_best.setter
def cifs_best(self, new_value):
self._cif_best = new_value
@property
def results(self):
if self._results is not None:
return self._results
else:
return "Please run analyze() first"
@results.setter
def results(self, new_value):
self._results = new_value
if maxsys is None and elements is not None:
self.maxsys = len(elements)
else:
self.maxsys = maxsys
self.corrinfo = None ## :)
def get_cifs(
self,
ignore_provider: List[str] = None,
ignore_comb: List[str] = None,
ignore_ids: List[str] = None,
) -> XRay:
"""
Get cifs from MongoDB and write to working folder.
If a certain combination of elements CIF is not present, it will automatically download
Parameters
----------
ignore_provider : Ignores a certain list of providers. In case of one, needs a list with one element, eg ["AFLOW"]
ignore_comb : List of possible combinations to be ignored.
ignore_ids: List of possible unique IDs to be ignored.
Returns
-------
self
"""
cif_meta, cif_notran, cif_notsim = LocalDB().get_cifs_and_write(
element_list=self.elements,
outfolder=self.working_folder,
maxn=self.maxsys,
max_oxy=self.max_oxy,
name = self.name
)
self.cif_info = cif_meta
if ignore_provider is not None:
self.cif_info = self.cif_info[~self.cif_info.provider.isin(ignore_provider)]
self.cif_info.reset_index(drop=True, inplace=True)
if ignore_comb is not None:
correct = []
for comb in ignore_comb:
_correct = "".join(comb.split("-"))
correct.append(make_system(Composition(_correct).formula))
self.cif_info = self.cif_info[~self.cif_info.system_type.isin(correct)]
self.cif_info.reset_index(drop=True, inplace=True)
if ignore_ids is not None:
self.cif_info = self.cif_info[~self.cif_info.id.isin(ignore_ids)]
self.cif_info.reset_index(drop=True, inplace=True)
self.cif_notran = cif_notran
self.cif_notsim = cif_notsim
folder = self.working_folder
name_out_cif = os.path.join(
folder, os.path.normpath(folder).replace(os.sep, "_") + "_all_cifs.csv"
)
name_out_notran = os.path.join(
folder, os.path.normpath(folder).replace(os.sep, "_") + "_not_used_riet.csv"
)
name_out_notsim = os.path.join(
folder, os.path.normpath(folder).replace(os.sep, "_") + "_not_used_sim.csv"
)
self.cif_info.to_csv(name_out_cif, index=False)
self.cif_notran.to_csv(name_out_notran, index=False)
self.cif_notsim.to_csv(name_out_notsim, index=False)
return self
def simulate_all(self, n_jobs: int = -1):
"""
Parallel simulation of XRD patterns using Modin (Ray backend)
Parameters
----------
n_jobs: int, default: -1
How many workers to use when starting multiprocessing Pool. If none is given, it will use all
Returns
-------
self
"""
if n_jobs > os.cpu_count():
n_jobs = os.cpu_count()
elif n_jobs == -1:
n_jobs = None
tmin = self.tmin
tmax = self.tmax
step = self.step
ciflist = self.cif_info.copy()
self.cif_all = self.cif_info.copy() # keep a copy of all cifs
working_folder = self.working_folder
instr_params = self.instr_params
print("Simulating {} patterns".format(len(ciflist)))
args = [
[f] + [tmin, tmax, step, working_folder, instr_params]
for f in ciflist.full_path
]
with Pool(processes=n_jobs) as p:
paths = p.starmap(simulate_spectra, args)
p.close()
p.join()
print("Done. Cleaning up GSASII files.")
# Clean up
files = [
file
for file in os.listdir(os.getcwd())
if file.endswith(".gpx") or file.endswith(".lst")
]
for file in files:
# print(f"Cleaning up {file}")
os.remove(file)
ciflist["simulated_files"] = [r[0] for r in paths]
ciflist["simulated_reflects"] = [r[1] for r in paths]
ciflist["sm_ran"] = [r[2] for r in paths]
ciflist = ciflist[ciflist["sm_ran"]]
ciflist.drop(["sm_ran"], axis=1, inplace=True)
ciflist.reset_index(drop=True, inplace=True)
ciflist = pd.DataFrame(
ciflist.to_records(index=False)
) # convert back to a pandas df
self.cif_info = ciflist.copy()
return self
def _read_simulations(self) -> XRay:
"""
Helper function for reading a simulations based on simulated (ran) information
Returns
-------
self
"""
reflections = []
simulations = []
for sim, ref, spg, spgnum, cs, name, provider, mid, fname, cij in zip(
self.cif_info["simulated_files"],
self.cif_info["simulated_reflects"],
self.cif_info["spacegroup"],
self.cif_info["spacegroup_number"],
self.cif_info["crystal_system"],
self.cif_info["name"],
self.cif_info["provider"],
self.cif_info["id"],
self.cif_info["filename"],
self.cif_info["Cij"],
):
if spg != "None":
sim_data = pd.read_csv(sim)
ref_data = pd.read_csv(ref)
sim_data["name"] = name
sim_data["provider"] = provider
sim_data["mid"] = mid
sim_data["filename"] = fname
sim_data["spacegroup"] = spg
sim_data["spacegroup_number"] = spgnum
sim_data["crystal_system"] = cs
sim_data["Cij"] = cij
reflections.append(ref_data)
simulations.append((sim_data, fname))
self.simulated_gsas2 = simulations
self.reflections_gsas2 = reflections
return self
def select_cifs(
self,
cif_info: Union[pd.DataFrame, str] = "auto",
save: bool = True,
normalize: bool = True,
by_systemtype: bool = True,
) -> pd.DataFrame:
"""
Filter CIFs by correlation plus either stoichiometry + spacegroup or by system type + spacegroup.
This method is done so, we avoid using alot of patterns of the same structure when searching for phases.
Try to returns highest correlated pattern with experimental data.
Parameters
----------
cif_info : pd.DataFrame
A Pandas dataframe containing query data from database
save : bool, default: True
Override cif info. Not used.
normalize : bool, default: True
Attempts to "normalize" compositions for groping (stoich+spacegroup) method.
This case is to try to group off-stoichiometric compositions with same spacegroup that should be the same.
Not so effective.
by_systemtype : bool, default: True
Instead of using stoichiometry, uses "system_type", ie: HoB2, HoB4 etc => Ho-B. Then
Notes
------
This method is an ATTEMPT at trying to reduce the number of identical structures that comes from different sources.
Altough it can filter quite well, it fails for 'non-stoichiometric' cases
A method for grouping identical crystal structures is still required.
Returns
-------
pd.DataFrame
Returns the filtered dataframe.
"""
# This needs to be rewritten someday.
if type(cif_info) == str:
if cif_info == "auto":
cif_info = self.cif_info
if self.cif_info is None:
raise TypeError("Report file or no patterns have been simulated.")
if not by_systemtype:
if not normalize:
dfs = group_data(cif_info, column_name=["name", "spacegroup_number"])
else:
cif_info["normalized_formula"] = cif_info.name.apply(normalize_formula)
dfs = group_data(
cif_info, column_name=["normalized_formula", "spacegroup_number"]
)
else:
dfs = group_data(cif_info, column_name=["system_type", "spacegroup_number"])
chid = []
for df in dfs:
df.sort_values(by="Cij", ascending=False, inplace=True)
# Get the id corresponding to the highest correlated.
# Here we are hoping that those patterns are all the same (fingers-crossed)
material_id = df.id.iat[0]
chid.append(material_id)
df_ = cif_info[cif_info.id.isin(chid)].copy() # create new df
if save:
self.modified_df = df_.copy()
self.chosen_ids = chid
return df_
else:
return df_
def calculate_correlations(
self, select_cifs: bool = True, by_sys: bool = True
) -> str:
"""
Calculate correlations between experimental data and obtained crystal structures
Parameters
----------
select_cifs : If set True, it will select one crystal structure out of a group of similar (same spacegroup/lattice)
by taking the one with highest correlation with experiemntal data. This is to avoid a run only having the
"correct" pattern from many different sources.
Returns
-------
A string with the filename with the correlations with the simulated patterns with the experimental pattern
"""
exp_data = self.exp_data.copy()
df_data = [exp_data.loc[:, "int"]]
filenames = [os.path.basename(self.exp_data.filename.iat[0])]
self.cif_info.reset_index(drop=True, inplace=True)
for path in self.cif_info.loc[:, "simulated_files"]:
df_data.append(pd.read_csv(path).loc[:, "int"])
filenames.append(os.path.basename(path))
intensities = pd.concat(df_data, axis=1) # concat intensities
intensities.dropna(inplace=True) # drop anyna just incase
intensities.columns = filenames # set the columns for filenames
Cij = intensities.corr()
Cij_ = Cij.iloc[1:, 0]
self.cif_info["Cij"] = np.array(Cij_)
# Try to filter out "same" CIFs (otherwise we just get a bunch of high Cij from the same phase)
if select_cifs:
self.cif_info = self.select_cifs(
save=False, normalize=True, by_systemtype=by_sys
)
folder = self.working_folder
name_out = os.path.join(
folder,
os.path.normpath(folder).replace(os.sep, "_") + "_Correlations_run_1.csv",
)
# Export already sorted
self.cif_info.sort_values(by="Cij", ascending=False).to_csv(name_out)
print(
f"""Highest Correlated pattern is {self.cif_info.sort_values(by='Cij', ascending=False).name.iat[0]}, with Cij: {self.cif_info.sort_values(by='Cij', ascending=False).Cij.iat[0]}"""
)
self.corrinfo = name_out
return name_out
def analyze(
self,
n_runs: Any[int, str] = "auto",
grabtop: int = 3,
delta: float = 1.3,
combine_filter: bool = False,
select_cifs: bool = True,
plot_all: bool = False,
ignore_provider: List[str] = ("AFLOW",),
ignore_comb: List[str] = None,
ignore_ids: List[str] = None,
solver: str = "box",
group_method: str = "system_type",
auto_threshold: int = 10,
r_ori: bool = False,
n_jobs: int = -1,
) -> pd.DataFrame:
"""
Search for possible phases in a experimental pattern given the possible elements
Parameters
----------
n_runs : Number of phases to search, including main phase
grabtop : At each run, compare how many patterns?
delta : Width of peak removal (bragg)
combine_filter : When doing combinations, allows the code to search a grabtop+1 position with a pattern has
already shown up in a previous run?
select_cifs : Defaults to True. See calculate_correlation documentantion
plot_all : To export all refinement plots (defaults to False)
ignore_provider : A list of providers to ignore. Default: ["AFLOW"], due to the large amount of theoretical crystal structures. Can be manually turned on.
ignore_comb : A list of combinations to ignore. Eg: ["B-O"], would ignore searching for B-O oxide.
ignore_ids: A list of possible unique IDs to ignore. Defaults to None.
solver: Decide which solver to use. Defaults to box method "box". For residual method use "rietveld"
group_method: Decides how to try to group similiar crystal structures. Defaults to "system_type".
For stoichmetry based grouping use "stoich".
auto_threshold: Threshold for when to stop box method when n_runs is set to auto. Defaults to 10 (percent)
r_ori: Allows the `rietveld` search method to also try to account for texture (pref. orientation). Defaults to False
n_jobs: How many proccesses to use when simulating / refining combinations (box)
Returns
-------
A pandas dataFrame with the search results.
"""
# Setup to max CPU count.
if n_jobs == -1:
n_jobs = os.cpu_count()
if solver not in ["box", "rietveld"]:
raise ValueError
if group_method not in ["system_type", "stoich"]:
raise ValueError
elif group_method == "system_type":
systype = True
else:
systype = False
if self.use_preprocessed:
self.exp_data_file = self._preprocess_path
self.datafmt = "csv"
print(
f"Using preprocessed data {self._preprocess_path}. New datafmt is: {self.datafmt}"
)
if n_runs == "auto":
auto = True
n_runs = 100
else:
auto = False
# Get the cifs, simulate the patterns, run correlation (first phase)
self.get_cifs(
ignore_provider=ignore_provider,
ignore_comb=ignore_comb,
ignore_ids=ignore_ids,
).simulate_all(n_jobs=n_jobs).calculate_correlations(
select_cifs=select_cifs, by_sys=systype
)
# Plot highest correlated first.
self.plot_highest_correlated()
# Remove the patterns and get the information of each run etc:
if solver == "rietveld":
if n_runs == "auto":
raise (
NotImplementedError,
"n_runs = `auto` not available for residual method.",
)
runs, topn, topnfilter = run_correlation_analysis_riet(
experimental_data=self.exp_data_file,
patterns_data=self.cif_info,
number_of_runs=n_runs,
datafmt=self.datafmt,
grabtop=grabtop,
working_folder=self.working_folder,
allow_ori=r_ori,
)
else:
runs, topn, topnfilter = run_correlation_analysis(
experimental_data=self.exp_data_file,
patterns_data=self.cif_info,
delta=delta,
number_of_runs=n_runs,
auto=auto,
datafmt=self.datafmt,
grabtop=grabtop,
working_folder=self.working_folder,
remove_background=self.rb,
poly_degree=self.pd,
auto_threshold=auto_threshold,
)
if auto:
n_runs = len(runs)
topnfilter = [pd.DataFrame(data) for data in topnfilter]
if n_runs == 1:
df_final = topn[0]
df_final["gpx_path"] = df_final.apply(
lambda row: make_gpx(row["name"], row.spacegroup, row.full_path), axis=1
)
df_final.sort_values(by="rwp", inplace=True)
df_final.reset_index(drop=True, inplace=True)
df_final.to_csv(
os.path.join(self.working_folder, "Refinement Results.csv"), index=False
)
df_final.drop(["bragg"], axis=1, inplace=True)
print("Analysis complete")
best_rwp = df_final["rwp"].iat[0]
best_gpx = df_final["gpx_path"].iat[0]
best_cifs = df_final["filename"].iat[0]
print(f"Best result {best_cifs}, Rwp: {best_rwp:.2f}%")
self.rwp = best_rwp
self.gpx_best = best_gpx
self.cifs_best = best_cifs
self.results = df_final
self.plot_result(0, engine="plotly", mode="simul", save=True)
print(
f"Saved final plot result as {os.path.basename(best_gpx).replace('gpx', 'html')}"
)
else:
make_plot_all(runs, topn, self.name, self.working_folder)
make_plot_step(runs, topn, self.name, self.working_folder, solver=solver)
if solver == "box":
make_plot_step(
runs, topnfilter, self.name + "_filter", self.working_folder
)
# In case of using "filter" option (remove structure that has already appeared in n-k run, k > 0
if combine_filter:
trials = combine_pos(top_patterns=topnfilter)
else:
trials = combine_pos(top_patterns=topn)
# If dont use filter, attempt to clean up repeated runs and runs with same structure.
aux_df = pd.DataFrame(trials)
# Get ids from each combination (id is a unique identifier of structure of each database)
aux_df["ids"] = aux_df.comb.apply(
lambda comb: [dic["id"] for dic in comb]
)
# Remove ids where there is more than the same id in the list
aux_clean = aux_df[
aux_df.ids.apply(
lambda lst: all(
[lst.count(element) == 1 for element in lst]
)
)
].copy()
# Clean up now runs that are identical
aux_clean["ids_str"] = aux_clean["ids"].apply(
lambda lst: ",".join(lst)
)
aux_clean.drop_duplicates(subset="ids_str", inplace=True)
aux_clean.reset_index(drop=True, inplace=True)
print(
f"Removed {len(aux_df) - len(aux_clean)} repeated combinations."
)
# Remove it
aux_clean.drop(["ids", "ids_str"], axis=1, inplace=True)
if solver == "box":
# Set up the dataframe to refine the patterns in parallel using modin
if combine_filter:
to_run = pd.DataFrame(trials)
else:
to_run = aux_clean
# Set up arguments for multiprocessing
data = self.exp_data_file
wf = self.working_folder
comb_run = to_run.to_dict(orient="records")
args = [[f] + [data, wf] for f in comb_run]
# Refine all combinations in parallel
with Pool(processes=n_jobs) as p:
result = p.starmap(refine_comb, args)
p.close()
p.join()
# Parse results and report.
to_run["rwp"] = [r["rwp"] for r in result]
to_run["wt"] = [r["wt"] for r in result]
outpath = os.path.join(self.working_folder, "trials_result.json")
to_run.sort_values(by="rwp", inplace=True)
trials = to_run.to_dict(orient="records")
# Save raw results in json
save_json(trials, outpath)
# Load first run from correction analysis and concatenate all
first_trial = load_json(
os.path.join(self.working_folder, "first_run.json")
)
first_df = | pd.DataFrame(first_trial) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fecha:
Enero 2020
Asignatura:
Estadística Multivariante
Documentación sobre clustering en Python:
http://scikit-learn.org/stable/modules/clustering.html
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
"""
import time
import matplotlib.pyplot as plt
import pandas as pd
import os
from sklearn.cluster import DBSCAN, KMeans, MeanShift, estimate_bandwidth
from sklearn.cluster import AgglomerativeClustering
from sklearn import metrics
from sklearn import preprocessing
from math import floor
import seaborn as sns
from scipy.cluster import hierarchy
import warnings
def norm_to_zero_one(df):
return (df - df.min()) * 1.0 / (df.max() - df.min())
# Dibujar Scatter Matrix
def ScatterMatrix(X, name, path):
print("\nGenerando scatter matrix...")
sns.set()
variables = list(X)
variables.remove('cluster')
sns_plot = sns.pairplot(X, vars=variables, hue="cluster", palette='Paired',
plot_kws={"s": 25}, diag_kind="hist")
#en 'hue' indicamos que la columna 'cluster' define los colores
sns_plot.fig.subplots_adjust(wspace=.03, hspace=.03);
plt.savefig(path+"scatmatrix"+name+".png")
plt.clf()
# Dibujar Heatmap
def Heatmap(X, name, path, dataset, labels):
print("\nGenerando heat-map...")
cluster_centers = X.groupby("cluster").mean()
centers = pd.DataFrame(cluster_centers, columns=list(dataset))
centers_desnormal = centers.copy()
#se convierten los centros a los rangos originales antes de normalizar
for var in list(centers):
centers_desnormal[var] = dataset[var].min()+centers[var]*(dataset[var].max()-dataset[var].min())
plt.figure(figsize=(11, 13))
sns.heatmap(centers, cmap="YlGnBu", annot=centers_desnormal, fmt='.3f')
plt.savefig(path+"heatmap"+name+".png")
plt.clf()
# Dibujar Dendogramas (con y sin scatter matrix)
def Dendrograms(X, name, path):
print("\nGenerando dendogramas...")
#Para sacar el dendrograma en el jerárquico, no puedo tener muchos elementos.
#Hago un muestreo aleatorio para quedarme solo con 1000,
#aunque lo ideal es elegir un caso de estudio que ya dé un tamaño así
if len(X)>1000:
X = X.sample(1000, random_state=seed)
#Normalizo el conjunto filtrado
X_filtrado_normal = preprocessing.normalize(X, norm='l2')
linkage_array = hierarchy.ward(X_filtrado_normal)
#Saco el dendrograma usando scipy, que realmente vuelve a ejecutar el clustering jerárquico
hierarchy.dendrogram(linkage_array, orientation='left')
plt.savefig(path+"dendrogram"+name+".png")
plt.clf()
X_filtrado_normal_DF = pd.DataFrame(X_filtrado_normal,index=X.index,columns=usadas)
sns.clustermap(X_filtrado_normal_DF, method='ward', col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
plt.savefig(path+"dendscat"+name+".png")
plt.clf()
# Dibujar KdePlot
def KPlot(X, name, k, usadas, path):
print("\nGenerando kplot...")
n_var = len(usadas)
fig, axes = plt.subplots(k, n_var, sharex='col', figsize=(15,10))
fig.subplots_adjust(wspace=0.2)
colors = sns.color_palette(palette=None, n_colors=k, desat=None)
for i in range(k):
dat_filt = X.loc[X['cluster']==i]
for j in range(n_var):
sns.kdeplot(dat_filt[usadas[j]], shade=True, color=colors[i], ax=axes[i,j])
plt.savefig(path+"kdeplot"+name+".png")
plt.clf()
# Dibujar BoxPlot
def BoxPlot(X, name, k, usadas, path):
print("\nGenerando boxplot...")
n_var = len(usadas)
fig, axes = plt.subplots(k, n_var, sharey=True, figsize=(16, 16))
fig.subplots_adjust(wspace=0.4, hspace=0.4)
colors = sns.color_palette(palette=None, n_colors=k, desat=None)
rango = []
for i in range(n_var):
rango.append([X[usadas[i]].min(), X[usadas[i]].max()])
for i in range(k):
dat_filt = X.loc[X['cluster']==i]
for j in range(n_var):
ax = sns.boxplot(dat_filt[usadas[j]], color=colors[i], ax=axes[i, j])
ax.set_xlim(rango[j][0], rango[j][1])
plt.savefig(path+"boxplot"+name+".png")
plt.clf()
def ejecutarAlgoritmos(algoritmos, X, etiq, usadas, path):
# Crea el directorio si no existe
try:
os.stat(path)
except:
os.mkdir(path)
X_normal = X.apply(norm_to_zero_one)
# Listas para almacenar los valores
nombres = []
tiempos = []
numcluster = []
metricaCH = []
metricaSC = []
for name,alg in algoritmos:
print(name,end='')
t = time.time()
cluster_predict = alg.fit_predict(X_normal)
tiempo = time.time() - t
k = len(set(cluster_predict))
print(": clusters: {:3.0f}, ".format(k),end='')
print("{:6.2f} segundos".format(tiempo))
# Calculamos los valores de cada métrica
metric_CH = metrics.calinski_harabaz_score(X_normal, cluster_predict)
print("\nCalinski-Harabaz Index: {:.3f}, ".format(metric_CH), end='')
#el cálculo de Silhouette puede consumir mucha RAM.
#Si son muchos datos, más de 10k, se puede seleccionar una muestra, p.ej., el 20%
if len(X) > 10000:
m_sil = 0.2
else:
m_sil = 1.0
metric_SC = metrics.silhouette_score(X_normal, cluster_predict, metric='euclidean',
sample_size=floor(m_sil*len(X)), random_state=seed)
print("Silhouette Coefficient: {:.5f}".format(metric_SC))
#se convierte la asignación de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict,index=X.index,columns=['cluster'])
#y se añade como columna a X
X_cluster = pd.concat([X_normal, clusters], axis=1)
print("\nTamaño de cada cluster:\n")
size = clusters['cluster'].value_counts()
for num,i in size.iteritems():
print('%s: %5d (%5.2f%%)' % (num,i,100*i/len(clusters)))
nombre = name+str(etiq)
# Dibujamos el Scatter Matrix
ScatterMatrix(X = X_cluster, name = nombre, path = path)
# Dibujamos el Heatmap
Heatmap(X = X_cluster, name = nombre, path = path, dataset=X, labels = cluster_predict)
# Dibujamos KdePlot
KPlot(X = X_cluster, name = nombre, k = k, usadas = usadas, path = path)
# Dibujamos BoxPlot
BoxPlot(X = X_cluster, name = nombre, k = k, usadas = usadas, path = path)
if name=='AggCluster':
#Filtro quitando los elementos (outliers) que caen en clusters muy pequeños en el jerárquico
min_size = 5
X_filtrado = X_cluster[X_cluster.groupby('cluster').cluster.transform(len) > min_size]
k_filtrado = len(set(X_filtrado['cluster']))
print("De los {:.0f} clusters hay {:.0f} con más de {:.0f} elementos. Del total de {:.0f} elementos, se seleccionan {:.0f}".format(k,k_filtrado,min_size,len(X),len(X_filtrado)))
X_filtrado = X_filtrado.drop('cluster', 1)
Dendrograms(X = X_filtrado, name = nombre, path = path)
# Almacenamos los datos para generar la tabla comparativa
nombres.append(name)
tiempos.append(tiempo)
numcluster.append(len(set(cluster_predict)))
metricaCH.append(metric_CH)
metricaSC.append(metric_SC)
print("\n-------------------------------------------\n")
# Generamos la tabla comparativa
resultados = pd.concat([pd.DataFrame(nombres, columns=['Name']),
pd.DataFrame(numcluster, columns=['Num Clusters']),
pd.DataFrame(metricaCH, columns=['CH']),
pd.DataFrame(metricaSC, columns=['SC']),
pd.DataFrame(tiempos, columns=['Time'])], axis=1)
print(resultados)
if __name__ == '__main__':
datos = | pd.read_csv('iris.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
l_2d = [[0, 1, 2], [3, 4, 5]]
arr_t = np.array(l_2d).T
print(arr_t)
print(type(arr_t))
# [[0 3]
# [1 4]
# [2 5]]
# <class 'numpy.ndarray'>
l_2d_t = np.array(l_2d).T.tolist()
print(l_2d_t)
print(type(l_2d_t))
# [[0, 3], [1, 4], [2, 5]]
# <class 'list'>
df_t = pd.DataFrame(l_2d).T
print(df_t)
print(type(df_t))
# 0 1
# 0 0 3
# 1 1 4
# 2 2 5
# <class 'pandas.core.frame.DataFrame'>
l_2d_t = | pd.DataFrame(l_2d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from Pic.maxent_style import maxent_style, remove_palette
from Pic.maxent_font import tick_font
from itertools import combinations
import seaborn as sns
from scipy.stats import norm, t, f, betaprime, logistic, exponpow, foldnorm, poisson, zipf
from scipy.optimize import curve_fit
from scipy.misc import factorial
import re
def poisson_fit(k, lamb):
return (np.power(lamb, k) / factorial(k)) * np.exp(-lamb)
fit_functions = [norm, t, f, foldnorm, logistic, betaprime, exponpow]
fit_names = ["norm", "t", "f", "foldnorm", "logistic", "betaprime", "exponpow"]
# @remove_palette
@maxent_style
def makeHist(col, df, dpi=600, title=None, path=None, palette=None, fit=False):
"""
"""
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 2, 1)
maxValue = int(df[col].max())
if maxValue > 10:
step = int(maxValue / 10)
bins = range(-step, maxValue, step)
bins.append(maxValue)
else:
bins = range(-2, 10, 2)
re_cols = pd.cut(df[col], bins)
# print('re_cols\n', re_cols)
re = re_cols.value_counts(sort=False)
print('re\n', re)
re_div = re.div(re.sum())
print('re_idv\n', re_div)
re_div.plot.bar(ax=ax)
ax1 = fig.add_subplot(1, 2, 2)
re.plot.bar(ax=ax1, logy=True)
if title:
ax.set_title(title)
ax1.set_title(title + "/log")
else:
ax.set_title(col)
ax1.set_title(col + "/log")
# ax1.set_yscale("log")
fig.canvas.set_window_title(col)
if ".png" not in path:
path += '/{0}'.format(col) + '.png'
fig.savefig(filename=path, dpi=dpi, format='png')
plt.show(block=False)
@maxent_style
@remove_palette
# def anormalyScoreHist(cols,score,df,dpi=600,path=None,bins = [-1,0,70,90,100],name="anormalyScore"):
def anormalyScoreHist(cols, score, df, dpi=600, path=None, name="anormalyScore"):
"""
"""
maxValue = int(df[score].max())
if maxValue > 10:
step = int(maxValue / 10)
bins = range(-step, maxValue, step)
bins.append(maxValue)
else:
bins = range(-2, 10, 2)
# re = pd.cut(df[cols], bins).value_counts(sort=False)
re = pd.cut(df[score], bins=bins)
# print('re\n',re.value_counts())
re_group = df.groupby(re)[cols].agg('sum')
indexs = re_group.index
print('re_group\n', re_group)
fig = plt.figure()
ax = fig.add_subplot(2, 1, 1)
re_group.plot.bar(ax=ax, stacked=True, legend=False)
# ax.set_title("total view")
ax.xaxis.label.set_visible(False)
tick_font(ax, font_size="x-small", rotation=90)
# re_group[cols] =re_group[cols].apply(lambda x: 0 if x.any() <=0 or x.any() == None else np.log(x))
patches, labels = ax.get_legend_handles_labels()
ax1 = fig.add_subplot(2, 1, 2)
ax1.axis("off")
lgd = ax1.legend(patches, labels, loc='center', ncol=4, bbox_to_anchor=(0.5, 0.2), \
fontsize='x-small', handlelength=0.5, handletextpad=0.8)
fig.canvas.set_window_title(name)
path_1 = path + '/{0}'.format(name + " total view") + '.png'
# fig.savefig(filename=path,dpi=dpi,format='png',bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(filename=path_1, dpi=dpi, format='png')
for k, v in enumerate(indexs):
df = re_group.loc[v, :]
x_labels = df.index.values
bar_values = df._values
fig1 = plt.figure()
axs = fig1.add_subplot(1, 1, 1)
for x, y in enumerate(x_labels):
axs.bar(x, bar_values[x])
axs.set_xticks(range(len(x_labels)))
axs.set_xticklabels(x_labels)
tick_font(axs, font_size="x-small", rotation=90)
path_2 = path + '/{0}_{1}-{2}.png'.format(name, v.left, v.right)
# axs.set_title(v + " score distribute")
fig1.subplots_adjust(bottom=0.4)
fig1.canvas.set_window_title(v)
fig1.savefig(filename=path_2, dpi=dpi, format='png')
# plt.show(block=True)
plt.show(block=False)
@maxent_style
@remove_palette
def dataCorr(cols, df, dpi=600, title='data correlations', path=None, filter_value=1.0):
corr_cols = combinations(cols, 2)
frames = []
for corr_col in corr_cols:
# print(tabulate(df.loc[(df[corr_col[0]] != 1) | (df[corr_col[1]] != 1),corr_col].corr(),showindex="always",)
# tablefmt='fancy_grid',headers=corr_col)
frames.append(df.loc[(df[corr_col[0]] != filter_value) | (df[corr_col[1]] != filter_value), corr_col].corr())
corr_result = pd.concat(frames)
# print(corr_result.shape ,'\n',corr_result)
# print_table(corr_result)
grouped_result = corr_result.groupby(corr_result.index)
# print(grouped_result)
agg_result = grouped_result.agg('sum')
agg_result[agg_result >= 1] = 1
# print(agg_result)
# print_table(agg_result)
# print_macdown_table(agg_result)
fig = plt.figure()
ax = fig.add_subplot(2, 1, 1)
agg_result.plot.bar(ax=ax, legend=False)
# ax.set_title("total view")
ax.xaxis.label.set_visible(False)
tick_font(ax, font_size="x-small", rotation=90)
# re_group[cols] =re_group[cols].apply(lambda x: 0 if x.any() <=0 or x.any() == None else np.log(x))
patches, labels = ax.get_legend_handles_labels()
ax1 = fig.add_subplot(2, 1, 2)
ax1.axis("off")
lgd = ax1.legend(patches, labels, loc='center', ncol=4, bbox_to_anchor=(0.5, 0.2), \
fontsize='x-small', handlelength=0.5, handletextpad=0.8)
fig.canvas.set_window_title("col correlation")
path += '/{0}'.format(title) + '.png'
fig.savefig(filename=path, dpi=dpi, format='png')
# plt.show()
plt.show(block=False)
@maxent_style
def makeFeatureHist(col, col1, df, feature="ipGeo", scope=[6, 8], dpi=600, path=None, palette=None):
"""
"""
df1 = df.loc[(df[col1] > scope[0]) & (df[col1] <= scope[1])]
df1_city = pd.value_counts(df1[feature])
if df1_city.size <= 0:
return
max_feature = df1_city.idxmax()
df = df.loc[df[feature] == max_feature]
df = df[col].dropna()
for fit_name, fit_func in zip(fit_names, fit_functions):
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 1, 1)
sns.distplot(ax=ax, a=df, color="blue", hist_kws={"histtype": "step", "linewidth": 3}, \
fit=fit_func, \
fit_kws={"color": next(palette), "lw": 3, "label": fit_name}, \
# rug=True,\
# rug_kws={"color": next(palette)},\
kde=True, \
kde_kws={"color": next(palette), "lw": 3, "label": "KDE"})
ax.set_title("{0}-{1}-{2}".format(col, fit_name, max_feature))
fig.canvas.set_window_title("{0}-{1}-{2}".format(col, fit_name, max_feature))
path1 = path + "/{0}-{1}.png".format(col, fit_name)
fig.savefig(filename=path1, dpi=dpi, format='png')
# plt.show(block=True)
plt.show(block=False)
@maxent_style
def makeSFeatureHist(col, col1, df, feature="maxentID", scope=[6, 8], dpi=600, path=None, palette=None):
"""
"""
df = df[col].dropna()
for fit_name, fit_func in zip(fit_names, fit_functions):
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 1, 1)
sns.distplot(ax=ax, a=df, color="blue", hist_kws={"histtype": "step", "linewidth": 3}, \
fit=fit_func, \
fit_kws={"color": next(palette), "lw": 3, "label": fit_name}, \
# rug=True,\
# rug_kws={"color": next(palette)},\
kde=True, \
kde_kws={"color": next(palette), "lw": 3, "label": "KDE"})
ax.set_title("{0}-{1}".format(col, fit_name))
fig.canvas.set_window_title("{0}-{1}".format(col, fit_name))
path1 = path + "/{0}-{1}.png".format(col, fit_name)
fig.savefig(filename=path1, dpi=dpi, format='png')
# plt.show(block=True)
plt.show(block=False)
@maxent_style
@remove_palette
def pic_label(df, col, title, file_path, dpi=600):
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 1, 1)
ax = df[col].value_counts(normalize=True).plot(kind='bar', ax=ax)
label_0_num = df.loc[df[col] == 0].shape[0]
label_1_num = df.loc[df[col] == 1].shape[0]
text_print = "label = 0 num: {0}\n\nlabel = 1 num: {1}".format(label_0_num, label_1_num)
ax.text(1.55, 0.5, text_print, ha='left', va='center')
fig.subplots_adjust(left=0.1, right=0.7)
ax.yaxis.set_ticklabels(ax.yaxis.get_ticklabels(), rotation=0, ha='right')
ax.xaxis.set_ticklabels(ax.xaxis.get_ticklabels(), rotation=45, ha='right')
ax.set_ylabel('ratio')
ax.set_xlabel('label type')
ax.set_title(title)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if re.search(u'[\u4e00-\u9fff]', title):
file_path += u"/{0}.png".format(title)
else:
file_path += "/{0}.png".format(title.replace(" ", "_"))
fig.savefig(filename=file_path, dpi=dpi, format='png')
plt.show(block=False)
@maxent_style
def makeFeatureGroupCountHist(gcol, ccol, df, dpi=600, title=None, fname=None, palette=None, normal=True, pic=False):
"""
first: this function used to plot df to do groupby used gcol
send: unique count with ccol and make hist and kde with ccol
:param gcol:
:param ccol:
:param df:
:param dpi:
:param title:
:param fname:
:param palette:
:param normal: use normal data or not
:param pic:
:return:
"""
if normal:
df = df.loc[df[ccol].notnull()]
df = df.groupby(gcol).agg({ccol: 'nunique'}).reset_index()
# if threshold:
# threshold_path = path + "/{0}_{1}_threshold_ratio.png".format(gcol, ccol)
# threshold_title = title + "阈值调整影响"
# pic_line_threshold_ratio(thresholds=df[ccol].values, title=threshold_title, path=threshold_path)
if pic:
path1 = fname + "/each_{0}_{1}_distribution.png".format(gcol, ccol)
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 1, 1)
sns.distplot(ax=ax, a=df[ccol], color="blue", hist_kws={"histtype": "step", "linewidth": 3},
fit=norm,
fit_kws={"color": next(palette), "lw": 3, "label": "normal"},
kde=True,
kde_kws={"color": next(palette), "lw": 3, "label": "KDE"})
ax.set_title(title)
fig.canvas.set_window_title(title)
fig.savefig(filename=path1, dpi=dpi, format='png')
plt.show(block=False)
# else:
# makeHist(col=ccol, df=df, path=path1, title=title)
return df
@maxent_style
def makeFeatureGroupTimeHist(gcol, df, dpi=600, title=None, fname=None, palette=None, normal=True, pic=False):
"""
first: this function use df to groupby gcol
second: agg new col based tcol with max(tcol) - min(tcol)
:param gcol:
:param tcol:
:param df:
:param dpi:
:param title:
:param fname: save file name
:param palette:
:param normal:
:param pic:
:return:
"""
def get_delta(row):
_delta = 0
ckids = list(row['ckid'])
time_min = list(row['timestamp_min'])
time_max = list(row['timestamp_max'])
for i in xrange(len(ckids) - 1):
pre_time, next_time = time_max[i], time_min[i + 1]
_delta += next_time - pre_time
_del_mean = _delta / len(ckids)
return _del_mean
df = df.groupby(['mobile', 'ckid']).agg({
"timestamp": ["max", 'min']})
df.columns = ["_".join(x) for x in df.columns.ravel()]
df = df.reset_index()
df = df.sort_values(by="timestamp_max")
df = df.groupby("mobile").apply(lambda x: get_delta(x)).reset_index(name='delta')
df['delta'] = df['delta'] / 1000 / 60 / 60
if normal:
df = df.loc[df['delta'] > 0]
# if threshold:
# if isinstance(gcol, list):
# threshold_path = path + "/{0}_{1}_threshold_ratio.png".format("_".join(gcol), tcol)
# else:
# threshold_path = path + "/{0}_{1}_threshold_ratio.png".format(gcol, tcol)
# threshold_title = title + "阈值调整影响"
# pic_line_threshold_ratio(thresholds=df['delta'].values, title=threshold_title, num=3, path=threshold_path)
if pic:
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 1, 1)
ax.hist()
sns.distplot(ax=ax, a=df['delta'], color="blue", hist_kws={"histtype": "step", "linewidth": 3},
fit=norm,
fit_kws={"color": next(palette), "lw": 3, "label": "normal"},
kde=True,
kde_kws={"color": next(palette), "lw": 3, "label": "KDE"})
ax.set_title(title)
fig.canvas.set_window_title(title)
ax.set_xlabel("{0}变化数量/小时".format(gcol[-1]))
fig.savefig(filename=fname, dpi=dpi, format='png')
plt.show(block=False)
# else:
# makeHist(col='delta',df=df, path=path1, title=title)
return df
@maxent_style
def makeFeatureCorrTimeHist(cor_cols, df, gcol=None, resample_ratio="W", dpi=600, path=None, palette=None):
"""
this function used to draw two or more
:param gcol:
:param tcol:
:param df:
:param resample_ratio:
:param dpi:
:param title:
:param path:
:param palette:
:return:
"""
df = df.copy()
df["timestamp"] = pd.to_datetime(df['timestamp'], unit='ms', utc=True)
df = df.set_index("timestamp")
groups = []
if gcol:
groups.append(gcol)
if resample_ratio:
groups.append(pd.TimeGrouper(resample_ratio))
grouper = df.groupby(groups)
df1 = pd.DataFrame()
df1[cor_cols[0]] = grouper[cor_cols[0]].count().values
df1[cor_cols[1]] = grouper[cor_cols[1]].count().values
xlim = (df1[cor_cols[0]].min() * 0.8, df1[cor_cols[0]].max() * 1.1)
ylim = (df1[cor_cols[1]].min() * 0.8, df1[cor_cols[1]].max() * 1.1)
grid = sns.jointplot(x=cor_cols[0], y=cor_cols[1], data=df1, kind="reg", xlim=xlim, ylim=ylim,
color=next(palette), size=12);
# grid.fig.suptitle("{0}与{1}相关性/周".format(cor_cols[0], cor_cols[1]))
grid.fig.suptitle("{0}与{1}相关性/周".format(cor_cols[0], cor_cols[1]))
path1 = path + "/{0}_{1}_correlation.png".format(cor_cols[0], cor_cols[1])
grid.savefig(filename=path1, dpi=dpi, format='png')
plt.show(block=False)
@maxent_style
def makeCitySwitchTimeHist(df, gcol, time_scope=[20, 60], dpi=600, path=None, palette=None):
"""
this function used to get the ratio and number change two faste from scols
first: do group by gcol
second: analysis scols switch to new use time less than time_scope, get the number and ratio
third: pic a picture, analysis the distribution and correlation
:param scols:
:param df:
:param gcol:
:param time_scope:
:param dpi:
:param path:
:param palette:
:return:
"""
df = df.copy()
df = df.sort_values(by="timestamp")
def get_switch_over(row):
switch_quick_num = 0
province = list(row['province'])
city = list(row['city'])
time_stamp = list(row['timestamp'])
for i in xrange(len(time_stamp) - 1):
pre_province, next_province = province[i], province[i + 1]
pre_city, next_city = city[i], city[i + 1]
pre_time, next_time = time_stamp[i], time_stamp[i + 1]
time_delta = (next_time - pre_time) / 1000 / 60
if pre_province == next_province:
if pre_city != next_city and time_delta <= time_scope[0]:
switch_quick_num += 1
else:
if pre_city != next_city and time_delta <= time_scope[1]:
switch_quick_num += 1
return switch_quick_num
df_switch = df.groupby(gcol).apply(lambda x: get_switch_over(x)).reset_index(name='switch')
# df_switch = df_switch.loc[df_switch['switch'] > 0]
# df_city = df.groupby(gcol).agg({'city':'count'}).reset_index()
# df1 = pd.merge(df_switch,df_city,on=gcol)
# df1['ratio'] = df1['switch'] / df1['city']
# xlim = (df1['switch'].min() * 0.8, df1['switch'].max() * 1.1)
# ylim = (df1['ratio'].min() * 0.8 , df1['ratio'].max() * 1.1)
# grid = sns.jointplot(x='switch', y='ratio', data=df1, kind="reg", xlim=xlim, ylim=ylim,
# color=next(palette), size=12);
# grid.fig.suptitle("同一主动式ID切换过快与所占比例关系")
# grid.set_axis_labels(xlabel="同一主动式ID切换城市过快的数量", ylabel="同一主动式ID切换城市过快的比例")
# path1 = path + "/{0}_switch_quick_ratio_correlation.png".format(gcol)
# grid.savefig(filename=path1,dpi=dpi, format='png')
# plt.show(block=False)
return df_switch
@maxent_style
def makeEventSwitchHist(df, gcol, dpi=600, path=None, palette=None):
"""
this function used to analysis the num and ratio of event type different and ckid change, too
first: group by gcol
sencond: get the number and ratio while event type is not same from pre and cur and ckid is difference, too
:param df:
:param gcol:
:param time_scope:
:param dpi:
:param path:
:param palette:
:return:
"""
df = df.copy()
df = df.sort_values(by="timestamp")
def get_switch_event_ckid_same(row, same=True):
switch_evnet_num = 0
type_ = list(row['type'])
ckid = list(row['ckid'])
time_stamp = list(row['timestamp'])
for i in xrange(len(time_stamp) - 1):
pre_type, next_type = type_[i], type_[i + 1]
pre_ckid, next_ckid = ckid[i], ckid[i + 1]
if not same and pre_type != next_type and pre_ckid != next_ckid:
switch_evnet_num += 1
elif same and pre_type == next_type and pre_ckid != next_ckid:
switch_evnet_num += 1
return switch_evnet_num
df_same = df.groupby([gcol, 'type']).agg({"ckid": "nunique"}).reset_index()
df_same = df_same.loc[df_same['ckid'] >= 1]
if not df_same.empty:
df_same = df.merge(df_same.drop(['ckid'], axis=1), on=['mobile', 'type'])
df_same = df_same.sort_values(by="timestamp")
df_same = df_same.groupby(gcol).apply(lambda x: get_switch_event_ckid_same(x)).reset_index(name='same')
# df_same = df_same.loc[df_same['same'] > 0]
df_not_same = df.groupby(gcol).agg({"type": "nunique"}).reset_index()
df_not_same = df_not_same.loc[df_not_same['type'] > 1]
if not df_not_same.empty:
df_not_same = df.merge(df_not_same.drop(['type'], axis=1), on='mobile')
df_not_same = df_not_same.sort_values(by="timestamp")
df_not_same = df_not_same.groupby(gcol).apply(lambda x: get_switch_event_ckid_same(x, False)).reset_index(
name='not_same')
# df_not_same = df_not_same.loc[df_not_same['not_same'] > 0]
return df_same, df_not_same
# df_dev = df.groupby(gcol).agg({'ckid':'count'}).reset_index()
# df_dev = df_dev.loc[df_dev.ckid != 0]
# df1 = df_same.merge(df_not_same, on=gcol).merge(df_dev, on=gcol)
# df1['same_ratio'] = df1['same'] / df1['ckid']
# df1['not_same_ratio'] = df1['not_same'] / df1['ckid']
# xlim1 = (df1['same'].min() * 0.8, df1['same'].max() * 1.1)
# ylim1 = (df1['same_ratio'].min() * 0.8 , df1['same_ratio'].max() * 1.1)
# color_ = next(palette)
# grid1 = sns.jointplot(x='same', y='same_ratio', data=df1, kind="reg", xlim=xlim1, ylim=ylim1,
# color=color_, size=12);
# grid1.fig.suptitle("同一设备前后事件类型相同主动式指纹ID数量\比例\分布")
# grid1.set_axis_labels(xlabel="同一设备前后事件类型相同主动式指纹ID数量", ylabel="同一设备前后事件类型相同主动式指纹ID比例")
# xlim2 = (df1['not_same'].min() * 0.8, df1['not_same'].max() * 1.1)
# ylim2 = (df1['not_same_ratio'].min() * 0.8 , df1['not_same_ratio'].max() * 1.1)
# grid2 = sns.jointplot(x='not_same', y='not_same_ratio', data=df1, kind="reg", xlim=xlim2, ylim=ylim2,
# color=color_, size=12);
# grid2.fig.suptitle("同一设备前后事件类型不同主动式指纹ID数量\比例\分布")
# grid2.set_axis_labels(xlabel="同一设备前后事件类型不同主动式指纹ID数量", ylabel="同一设备前后事件类型不同主动式指纹ID比例")
# path1 = path + "/same_event_ckid_{0}_switch_ratio_num_correlation.png".format(gcol)
# path2 = path + "/not_same_event_ckid_{0}_switch_ratio_num_correlation.png".format(gcol)
# grid1.savefig(filename=path1,dpi=dpi, format='png')
# grid2.savefig(filename=path2,dpi=dpi, format='png')
# plt.show(block=False)
@maxent_style
def hist_with_norm(col, df, steps=20, dpi=600, poison_r=True, title=None, fname=None ):
"""
plot hist with poisson distribution fit curve
:param col:
:param df:
:param dpi:
:param poison_r:
:param title:
:param fname:
:return:
"""
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 2, 1)
maxValue = df[col].max()
minValue = df[col].min()
bins = np.linspace(minValue, maxValue, steps)
re_cols = pd.cut(df[col], bins)
re = re_cols.value_counts(sort=False)
re_div = re.div(re.sum())
br = re_div.plot.bar(ax=ax, color='salmon')
x_plot = np.linspace(minValue, maxValue, 1000)
bin_middles = map(lambda patch: patch._x + patch._width / 2.0, br.containers[0])
bar_heights = map(lambda x: x._height, br.containers[0])
parameters, _ = curve_fit(poisson_fit, bin_middles, bar_heights)
ax.plot(x_plot, poisson_fit(x_plot, *parameters), "salmon-", lw=2)
ax.set_title(title)
fig.canvas.set_window_title(col)
fig.savefig(filename=fname, dpi=dpi, format='png')
plt.show(block=False)
@maxent_style
def hist_with_zip(col, df, dpi=600, title=None, fname=None, palette=None):
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(1, 2, 1)
maxValue = int(df[col].max())
minValue = int(df[col].min())
if maxValue > 10:
step = int(maxValue / 10)
bins = range(minValue, maxValue, step)
bins.append(maxValue)
else:
bins = range(minValue, 10, 1)
re_cols = pd.cut(df[col], bins, right=False, include_lowest=True)
re = re_cols.value_counts(sort=False)
re_div = re.div(re.sum())
br = re_div.plot.bar(ax=ax)
ax_log = fig.add_subplot(1, 2, 2)
br_log = re.plot.bar(ax=ax_log, logy=True)
bin_middles = np.array(map(lambda patch: patch._x + patch._width / 2.0, br.containers[0]))
bar_heights = np.array(map(lambda x: x._height, br.containers[0]))
norm_data = bin_middles[0]
pi = bar_heights[0]
x_plot = np.linspace(bin_middles[0], bin_middles[-1], 1000)
def zip_fit(k, _lamb):
k_zero = k - norm_data
return (k_zero == 0) * pi + \
(1 - pi) * (_lamb ** k_zero / factorial(k_zero) * np.exp(-_lamb))
parameters, _ = curve_fit(zip_fit, bin_middles, bar_heights)
fit_p = zip_fit(x_plot, *parameters)
ax.plot(x_plot, fit_p, "r-", lw=2)
ax.set_title(title)
ax_log.set_title(title + "/log")
fig.canvas.set_window_title(col)
fig.savefig(filename=fname, dpi=dpi, format='png')
plt.show(block=False)
@maxent_style
def hist_two_fig(col, df, dpi=600, title=None, fname=None, sparse=True, palette=None):
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(2, 1, 1)
maxValue = int(df[col].max())
minValue = int(df[col].min())
if sparse:
bins = range(minValue, maxValue, 1)
bins.append(maxValue)
else:
if maxValue > 10:
step = int(maxValue / 10)
bins = range(minValue, maxValue, step)
bins.append(maxValue)
else:
bins = range(minValue, 10, 1)
re_cols = | pd.cut(df[col], bins, right=False, include_lowest=True) | pandas.cut |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
| assert_frame_equal(new_move_df, expected) | pandas.testing.assert_frame_equal |
# coding=utf-8
import pandas as pd
import re
import os
import json
from datetime import datetime
class dataset_object:
"""
This class allow you to store the data and the category of these data
"""
def __init__(self, dataset,name):
self.dataset, self.name = dataset, name
class file_object:
"""
This class allow you to store detailed informations concerning the raw files
"""
def __init__(self, name, category, extension, path):
self.name, self.category, self.extension, self.path = name, category, extension, path
def load_data (list_of_file_object):
"""
Description :
This function allow you to load data from a list of file_object
and store those data in a dataset_object instance with the category of file
those data have been loaded from. Then store these object in a dictionary where the
keys is the category of the file where the data are from.
Based on the extension of the files :
Loading the data from the file
If the are more one file, the respective dataframe are concatenate
Storing the in a dictionary with the category as the key
Args:
list_of_file_object: list of file_object
Returns:
output: dict, key : string
value : dataset_object
"""
dict_ds = {}
for f in list_of_file_object:
name_and_ext = re.split(r'\.',f.name)
if(f.extension == "json"):
if f.category in dict_ds:
ds = pd.read_json(f.path + f.name)
ds_concat = pd.concat([dict_ds[f.category].dataset,ds])
dict_ds[f.category] = dataset_object(ds_concat,name_and_ext[0])
else:
ds = pd.read_json(f.path + f.name)
dict_ds[f.category] = dataset_object(ds,name_and_ext[0])
elif(f.extension == "csv"):
if f.category in dict_ds:
ds = | pd.read_csv(f.path + f.name) | pandas.read_csv |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum= | pd.concat([dataframeforsum,exp],axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isnull)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(tm.TestCase, TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
_multiprocess_can_split_ = True
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assert_index_equal(bf.columns, other.columns)
self.assert_index_equal(bf.index, other.index)
self.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assert_index_equal(bf.columns, self.frame.columns)
self.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
self.assertTrue(isinstance(right, Series))
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(right, expected)
# GH 9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
# other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
# TODO should reindex check_names?
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# neg indicies
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
self.assertRaises(IndexError, df.take, [3, 1, 2, 30], axis=0)
self.assertRaises(IndexError, df.take, [3, 1, 2, -31], axis=0)
self.assertRaises(IndexError, df.take, [3, 1, 2, 5], axis=1)
self.assertRaises(IndexError, df.take, [3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# neg indicies
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[0][1]))
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[1]).all())
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
self.assertIn('foo', reindexed)
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
self.assertNotIn('foo', reindexed)
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
self.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
self.assertEqual(smaller['E'].dtype, np.float64)
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
newFrame = self.frame.reindex_axis(cols, axis=1)
| assert_frame_equal(newFrame, self.frame) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 17:57:37 2020
@author: matt
"""
import pandas as pd
import argparse
# reading in kraken2 reports
def reading_kraken2(basepath, metadata, level):
# filenames become the index
kraken_total = pd.concat(
map(lambda file:
pd.read_csv(file,
sep='\t',
names=('rel_ab', file[:-4], 'assigned', 'rank', 'tax_id', 'sci_name'),
usecols=(1,3,4,5),
index_col=('tax_id','sci_name','rank')).T,
basepath.split()))
if 'HV1' in kraken_total.index:
kraken_total.index=kraken_total.index.str.replace('V','V-')
if 'MetaHIT-MH0001' in kraken_total.index:
kraken_total.index=kraken_total.index.str.replace('MetaHIT-M','M')
# total values of abundances (unassigned+root)
total_ab_kraken = kraken_total.loc[:,[0, 1]].sum(axis=1)
# relative abundances
kraken_total = kraken_total.div(total_ab_kraken, axis=0)
if level == 'species':
# filter so that only species remain and drop rank column afterwards
kraken_total = kraken_total.loc[:,kraken_total.columns.get_level_values(2).isin(['S'])].droplevel('rank', axis=1)
if level == 'genus':
# filter so that only species remain and drop rank column afterwards
kraken_total = kraken_total.loc[:,kraken_total.columns.get_level_values(2).isin(['G'])].droplevel('rank', axis=1)
if metadata:
df_metadata = | pd.read_csv(metadata, index_col=0) | pandas.read_csv |
import argparse
from collections import defaultdict
from nltk import word_tokenize
import os
import pandas as pd
import torch
from transformers import (
MarianTokenizer,
MarianMTModel,
)
def load_data(args):
""" Load data from tsv into pd.DataFrame """
df = | pd.read_csv(args.input_file, delimiter="\t") | pandas.read_csv |
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from typing import Optional, Sequence, Iterable, Union, List
from rdkit.Chem import Mol
class Dataset(ABC):
"""Abstract base class for datasets
Subclasses need to implement their own methods based on this class.
"""
def __init__(self) -> None:
pass
def __len__(self) -> int:
"""Get the number of elements in the dataset."""
raise NotImplementedError
@property
@abstractmethod
def mols(self):
raise NotImplementedError
@mols.setter
@abstractmethod
def mols(self, value: Union[List[str], List[Mol], np.array]):
raise NotImplementedError
@property
@abstractmethod
def X(self):
raise NotImplementedError
@X.setter
@abstractmethod
def X(self, value: np.ndarray):
raise NotImplementedError
@property
@abstractmethod
def y(self):
raise NotImplementedError
@y.setter
@abstractmethod
def y(self, value: np.ndarray):
raise NotImplementedError
@property
@abstractmethod
def ids(self):
raise NotImplementedError
@ids.setter
@abstractmethod
def ids(self, value: np.ndarray):
raise NotImplementedError
@property
@abstractmethod
def n_tasks(self):
raise NotImplementedError
@n_tasks.setter
@abstractmethod
def n_tasks(self, value: int):
raise NotImplementedError
@abstractmethod
def get_shape(self):
"""Get the shape of all the elements of the dataset.
mols, X, y, ids.
"""
raise NotImplementedError
@abstractmethod
def get_mols(self) -> np.ndarray:
"""Get the molecules (e.g. SMILES format) vector for this dataset as a single numpy array."""
raise NotImplementedError
@abstractmethod
def get_X(self) -> np.ndarray:
"""Get the features array for this dataset as a single numpy array."""
raise NotImplementedError
@abstractmethod
def get_y(self) -> np.ndarray:
"""Get the y (tasks) vector for this dataset as a single numpy array."""
raise NotImplementedError
@abstractmethod
def get_ids(self) -> np.ndarray:
"""Get the ids vector for this dataset as a single numpy array."""
raise NotImplementedError
@abstractmethod
def remove_nan(self, axis: int = 0):
raise NotImplementedError
@abstractmethod
def remove_elements(self, indexes: List[int]):
raise NotImplementedError
@abstractmethod
def select_features(self, indexes: List[int]):
raise NotImplementedError
@abstractmethod
def select(self, indexes: List[int], axis: int = 0):
raise NotImplementedError
@abstractmethod
def select_to_split(self, indexes: List[int]):
raise NotImplementedError
class NumpyDataset(Dataset):
"""A Dataset defined by in-memory numpy arrays.
This subclass of 'Dataset' stores arrays mols, X, y, ids in memory as
numpy arrays.
"""
@property
def mols(self):
return self._mols
@mols.setter
def mols(self, value):
self._mols = value
@property
def n_tasks(self):
return self._n_tasks
@n_tasks.setter
def n_tasks(self, value):
self._n_tasks = value
@property
def X(self):
if self._X is not None:
if self._X.size > 0:
if self.features2keep is not None:
if self.features2keep.size == 0:
raise Exception("This dataset has no features")
elif len(self._X.shape) == 2:
return self._X[:, self.features2keep]
else:
return self._X
else:
return self._X
else:
raise Exception("This dataset has no features")
else:
return None
@X.setter
def X(self, value: Union[np.array, list, None]):
if isinstance(value, list):
value = np.array(value)
if value is not None and value.size > 0:
if len(value.shape) == 2:
self.features2keep = np.array([i for i in range(value.shape[1])])
else:
self.features2keep = np.array([i for i in range(len(value))])
self._X = value
else:
self._X = None
@property
def y(self):
return self._y
@y.setter
def y(self, value):
if value is not None and value.size > 0:
self._y = value
else:
self._y = None
@property
def ids(self):
return self._ids
@ids.setter
def ids(self, value):
if value is not None and value.size > 0:
self._ids = value
else:
self._ids = [i for i in range(self.mols.shape[0])]
@property
def features2keep(self):
return self._features2keep
@features2keep.setter
def features2keep(self, value):
self._features2keep = value
def __len__(self) -> int:
return len(self.mols)
def __init__(self, mols: Union[np.ndarray, List[str], List[Mol]], X: Optional[np.ndarray] = None,
y: Optional[np.ndarray] = None,
ids: Optional[np.ndarray] = None, features2keep: Optional[np.ndarray] = None, n_tasks: int = 1):
"""Initialize a NumpyDataset object.
Parameters
----------
mols: np.ndarray
Input features. A numpy array of shape `(n_samples,)`.
X: np.ndarray, optional (default None)
Features. A numpy array of arrays of shape (n_samples, features size)
y: np.ndarray, optional (default None)
Labels. A numpy array of shape `(n_samples,)`. Note that each label can
have an arbitrary shape.
ids: np.ndarray, optional (default None)
Identifiers. A numpy array of shape (n_samples,)
features2keep: np.ndarray, optional (deafult None)
Indexes of the features of X to keep.
n_tasks: int, default 1
Number of learning tasks.
"""
super().__init__()
if not isinstance(mols, np.ndarray):
mols = np.array(mols)
if not isinstance(X, np.ndarray) and X is not None:
X = np.ndarray(X)
if not isinstance(y, np.ndarray) and y is not None:
y = np.ndarray(y)
if not isinstance(ids, np.ndarray) and ids is not None:
ids = np.ndarray(ids)
if not isinstance(features2keep, np.ndarray) and features2keep is not None:
features2keep = np.ndarray(features2keep)
self.mols = mols
if features2keep is not None:
self.features2keep = features2keep
else:
self.features2keep = None
self.X = X
self.y = y
self.ids = ids
self.n_tasks = n_tasks
def len_mols(self):
return len(self.mols)
def len_X(self):
if self.X is not None:
return self.X.shape
else:
return 'X not defined!'
def len_y(self):
if self.y is not None:
return self.y.shape
else:
return 'y not defined!'
def len_ids(self):
if self.ids is not None:
return self.ids.shape
else:
return 'ids not defined!'
def get_shape(self):
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the mols, X and y arrays.
"""
print('Mols_shape: ', self.len_mols())
print('Features_shape: ', self.len_X())
print('Labels_shape: ', self.len_y())
def get_mols(self) -> Union[List[str], List[Mol], None]:
"""Get the features array for this dataset as a single numpy array."""
if self.mols is not None:
return self.mols
else:
print("Molecules not defined!")
return None
def get_X(self) -> Union[np.ndarray, None]:
"""Get the X vector for this dataset as a single numpy array."""
if self.X is not None:
return self.X
else:
print("X not defined!")
return None
def get_y(self) -> Union[np.ndarray, None]:
"""Get the y vector for this dataset as a single numpy array."""
if self.y is not None:
return self.y
else:
print("y not defined!")
return None
def get_ids(self) -> Union[np.ndarray, None]:
"""Get the ids vector for this dataset as a single numpy array."""
if self.ids is not None:
return self.ids
else:
print("ids not defined!")
return None
def remove_duplicates(self):
unique, index = np.unique(self.X, return_index=True, axis=0)
self.select(index, axis=0)
def remove_elements(self, indexes):
"""Remove elements with specific indexes from the dataset
Very useful when doing feature selection or to remove NAs.
"""
all_indexes = self.ids
indexes_to_keep = list(set(all_indexes) - set(indexes))
self.select(indexes_to_keep)
def select_features(self, indexes):
self.select(indexes, axis=1)
def remove_nan(self, axis=0):
"""Remove only samples with at least one NaN in the features (when axis = 0)
Or remove samples with all features with NaNs and the features with at least one NaN (axis = 1) """
j = 0
indexes = []
if axis == 0:
shape = self.X.shape
X = self.X
for i in X:
if len(shape) == 2:
if np.isnan(np.dot(i, i)):
indexes.append(self.ids[j])
else:
if i is None:
indexes.append(self.ids[j])
j += 1
if len(indexes) > 0:
print('Elements with indexes: ', indexes, ' were removed due to the presence of NAs!')
# print('The elements in question are: ', self.mols[indexes])
self.remove_elements(indexes)
elif axis == 1:
self.X = self.X[~np.isnan(self.X).all(axis=1)]
nans_column_indexes = [nans_indexes[1] for nans_indexes in np.argwhere(np.isnan(self.X))]
column_sets = list(set(nans_column_indexes))
self.X = np.delete(self.X, column_sets, axis=1)
def select_to_split(self, indexes: List[int]):
y = None
X = None
ids = None
mols = [self.mols[i] for i in indexes]
if self.y is not None:
y = self.y[indexes]
if self.X is not None:
if len(self.X.shape) == 2:
X = self.X[indexes, :]
else:
X = self.X[indexes]
if self.ids is not None:
ids = self.ids[indexes]
return NumpyDataset(mols, X, y, ids, self.features2keep)
def select(self, indexes: Sequence[int], axis: int = 0):
"""Creates a new subdataset of self from a selection of indexes.
Parameters
----------
indexes: List[int]
List of indices to select.
axis: int
Axis
Returns
-------
Dataset
A NumpyDataset object containing only the selected indexes.
"""
if axis == 0:
all_indexes = self.ids
indexes_to_delete = sorted(list(set(all_indexes) - set(indexes)))
raw_indexes = []
for index in indexes_to_delete:
for i, mol_index in enumerate(all_indexes):
if index == mol_index:
raw_indexes.append(i)
self.mols = np.delete(self.mols, raw_indexes, axis)
if self.y is not None:
self.y = np.delete(self.y, raw_indexes, axis)
if self.X is not None:
self.X = np.delete(self.X, raw_indexes, axis)
if self.ids is not None:
self.ids = np.delete(self.ids, raw_indexes, axis)
if axis == 1:
indexes_to_delete = list(set(self.features2keep) - set(indexes))
self.features2keep = np.array(list(set(self.features2keep) - set(indexes_to_delete)))
self.features2keep = np.sort(self.features2keep)
def merge(self, datasets: Iterable[Dataset]) -> 'NumpyDataset':
"""Merges provided datasets with the self dataset.
Parameters
----------
datasets: Iterable[Dataset]
List of datasets to merge.
Returns
-------
NumpyDataset
A merged NumpyDataset.
"""
datasets = list(datasets)
X = self.X
y = self.y
ids = self.ids
mols = self.mols
flag2 = False
for ds in datasets:
mols = np.append(mols, ds.mols, axis=0)
y = np.append(y, ds.y, axis=0)
ids = np.append(ids, ds.ids, axis=0)
if X is not None:
if len(X[0]) == len(ds.X[0]):
X = np.append(X, ds.X, axis=0)
else:
flag2 = False
else:
flag2 = False
if flag2:
print('Features are not the same length/type... '
'\nRecalculate features for all inputs! '
'\nAppending empty array in dataset features!')
return NumpyDataset(mols, np.empty(), y, ids)
else:
return NumpyDataset(mols, X, y, ids, self.features2keep)
def save_to_csv(self, path):
df = pd.DataFrame()
if self.ids is not None:
df['ids'] = pd.Series(self.ids)
df['mols'] = pd.Series(self.mols)
if self.y is not None:
df['y'] = pd.Series(self.y)
if self.X is not None:
columns_names = ['feat_' + str(i + 1) for i in range(self.X.shape[1])]
df_x = pd.DataFrame(self.X, columns=columns_names)
df = pd.concat([df, df_x], axis=1)
df.to_csv(path, index=False)
# TODO: test load and save
def load_features(self, path, sep=',', header=0):
df = | pd.read_csv(path, sep=sep, header=header) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from pandas.testing import assert_series_equal
from sid.config import INDEX_NAMES
from sid.update_states import _kill_people_over_icu_limit
from sid.update_states import _update_immunity_level
from sid.update_states import _update_info_on_new_tests
from sid.update_states import _update_info_on_new_vaccinations
from sid.update_states import compute_waning_immunity
from sid.update_states import update_derived_state_variables
@pytest.mark.unit
def test_kill_people_over_icu_limit_not_binding():
states = pd.DataFrame({"needs_icu": [False] * 5 + [True] * 5, "cd_dead_true": -1})
params = pd.DataFrame(
{
"category": ["health_system"],
"subcategory": ["icu_limit_relative"],
"name": ["icu_limit_relative"],
"value": [50_000],
}
).set_index(INDEX_NAMES)
result = _kill_people_over_icu_limit(states, params, 0)
assert result["cd_dead_true"].eq(-1).all()
@pytest.mark.unit
@pytest.mark.parametrize("n_dead", range(6))
def test_kill_people_over_icu_limit_binding(n_dead):
states = pd.DataFrame(
{
"needs_icu": [False] * (5 - n_dead) + [True] * (5 + n_dead),
"cd_dead_true": -1,
}
)
params = pd.DataFrame(
{
"category": ["health_system"],
"subcategory": ["icu_limit_relative"],
"name": ["icu_limit_relative"],
"value": [50_000],
}
).set_index(INDEX_NAMES)
result = _kill_people_over_icu_limit(states, params, 0)
expected = [10 - n_dead, n_dead] if n_dead != 0 else [10]
assert (result["cd_dead_true"].value_counts() == expected).all()
@pytest.mark.unit
def test_update_info_on_new_tests():
"""Test that info on tests is correctly update.
The tests assume three people: 1. A generic case, 2. someone who will receive a
test, 3. someone who receives a positive test result, 4. someone who receives a
negative test result.
"""
states = pd.DataFrame(
{
"pending_test_date": pd.to_datetime([None, "2020-01-01", None, None]),
"cd_received_test_result_true": [-1, -1, 0, 0],
"cd_received_test_result_true_draws": [3, 3, 3, 3],
"received_test_result": [False, False, True, True],
"new_known_case": False,
"immunity": [0.0, 0.0, 1.0, 0.0],
"knows_immune": False,
"symptomatic": [False, False, False, False],
"infectious": [False, False, True, False],
"knows_infectious": False,
"cd_knows_infectious_false": -1,
"cd_infectious_false": [-1, -1, 5, -1],
}
)
to_be_processed_tests = pd.Series([False, True, False, False])
result = _update_info_on_new_tests(states, to_be_processed_tests)
expected = pd.DataFrame(
{
"pending_test_date": pd.to_datetime([None, None, None, None]),
"cd_received_test_result_true": [-1, 3, 0, 0],
"cd_received_test_result_true_draws": [3, 3, 3, 3],
"received_test_result": [False, False, False, False],
"new_known_case": [False, False, True, False],
"immunity": [0.0, 0.0, 1.0, 0.0],
"knows_immune": [False, False, True, False],
"symptomatic": [False, False, False, False],
"infectious": [False, False, True, False],
"knows_infectious": [False, False, True, False],
"cd_knows_infectious_false": [-1, -1, 5, -1],
"cd_infectious_false": [-1, -1, 5, -1],
}
)
assert result.equals(expected)
@pytest.mark.unit
def test_update_info_on_new_vaccinations():
states = pd.DataFrame(
{
"newly_vaccinated": [False, False, False, False],
"ever_vaccinated": [False, False, False, True],
"cd_ever_vaccinated": [-9999, -9999, -9999, -10],
}
)
newly_vaccinated = pd.Series([False, False, True, False])
result = _update_info_on_new_vaccinations(states, newly_vaccinated)
expected = pd.DataFrame(
{
"newly_vaccinated": [False, False, True, False],
"ever_vaccinated": [False, False, True, True],
"cd_ever_vaccinated": [-9999, -9999, 0, -10],
}
)
assert result.equals(expected)
@pytest.mark.unit
def test_update_derived_state_variables():
states = pd.DataFrame()
states["a"] = np.arange(5)
derived_state_variables = {"b": "a <= 3"}
calculated = update_derived_state_variables(states, derived_state_variables)["b"]
expected = pd.Series([True, True, True, True, False], name="b")
assert_series_equal(calculated, expected)
@pytest.fixture()
def waning_immunity_fixture():
"""Waning immunity fixture.
We test 6 cases (assuming that time_to_reach_maximum is 7 for infection and 28 for
vaccination):
(-9999): Needs to be set to zero.
(-9998): Needs to be set to zero, because linear function will be negative.
(0): Needs to be zero.
(-6): Is the increasing part for both infection and vaccination.
(-8): This is in the decreasing (increasing) part for infection (vaccination).
(-29): In this part both infection and vaccination should be decreasing.
"""
days_since_event_cd = pd.Series([-9999, -9998, 0, -6, -8, -29])
states = pd.DataFrame(
{
"cd_ever_infected": days_since_event_cd,
"cd_ever_vaccinated": days_since_event_cd,
}
)
# need to perform next lines since ``compute_waning_immunity`` expects this task to
# be done by ``_udpate_immunity_level``.
days_since_event = -days_since_event_cd
days_since_event[days_since_event >= 9999] = 0
# values below were calucated by hand
expected_immunity_infection = pd.Series([0, 0, 0, 0.62344, 0.9899, 0.9878])
expected_immunity_vaccination = pd.Series(
[0, 0, 0, 0.00787172, 0.018658891, 0.7998]
)
expected_immunity = np.maximum(
expected_immunity_infection, expected_immunity_vaccination
)
expected_states = states.assign(immunity=expected_immunity)
return {
"states": states,
"days_since_event": days_since_event,
"expected_immunity_infection": expected_immunity_infection,
"expected_immunity_vaccination": expected_immunity_vaccination,
"expected_states": expected_states,
}
@pytest.mark.unit
def test_update_immunity_level(params, waning_immunity_fixture):
states = waning_immunity_fixture["states"]
expected_states = waning_immunity_fixture["expected_states"]
calculated = _update_immunity_level(states, params)
assert_frame_equal(calculated, expected_states, check_dtype=False)
@pytest.mark.unit
@pytest.mark.parametrize("event", ["infection", "vaccination"])
def testcompute_waning_immunity(params, event, waning_immunity_fixture):
days_since_event = waning_immunity_fixture["days_since_event"]
expected = waning_immunity_fixture[f"expected_immunity_{event}"]
calculated = compute_waning_immunity(params, days_since_event, event)
| assert_series_equal(calculated, expected, check_dtype=False) | pandas.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
| assert_index_equal(value, level) | pandas.util.testing.assert_index_equal |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/20 15:19
Desc: 南华期货-商品指数历史走势-收益率指数-波动率
http://www.nanhua.net/nhzc/varietytrend.html
1000 点开始, 用收益率累计
目标地址: http://www.nanhua.net/ianalysis/volatility/20/NHCI.json?t=1574932291399
"""
import time
import requests
import pandas as pd
from akshare.futures_derivative.futures_index_price_nh import futures_index_symbol_table_nh
def futures_nh_volatility_index(symbol: str = "NHCI", period: str = '20') -> pd.DataFrame:
"""
南华期货-南华指数单品种-波动率-所有历史数据
http://www.nanhua.net/nhzc/varietytrend.html
:param symbol: 通过 ak.futures_index_symbol_table_nh() 获取
:type symbol: str
:param period: 波动周期 choice of {'5', '20', '60', '120'}
:type period: str
:return: 波动率-所有历史数据
:rtype: pandas.DataFrame
"""
symbol_df = futures_index_symbol_table_nh()
if symbol in symbol_df["code"].tolist():
t = time.time()
url = f"http://www.nanhua.net/ianalysis/volatility/{period}/{symbol}.json?t={int(round(t * 1000))}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "value"]
temp_df['date'] = | pd.to_datetime(temp_df["date"], unit='ms') | pandas.to_datetime |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = | algos.factorize(inds, sort=True) | pandas.core.algorithms.factorize |
import os
import pandas as pd
import numpy as np
from mobo.utils import find_pareto_front, calc_hypervolume
from utils import get_result_dir
'''
Export csv files for external visualization.
'''
class DataExport:
def __init__(self, optimizer, X, Y, args):
'''
Initialize data exporter from initial data (X, Y).
'''
self.optimizer = optimizer
self.problem = optimizer.real_problem
self.n_var, self.n_obj = self.problem.n_var, self.problem.n_obj
self.batch_size = self.optimizer.selection.batch_size
self.iter = 0
self.transformation = optimizer.transformation
# saving path related
self.result_dir = get_result_dir(args)
n_samples = X.shape[0]
# compute initial hypervolume
pfront, pidx = find_pareto_front(Y, return_index=True)
pset = X[pidx]
if args.ref_point is None:
args.ref_point = np.max(Y, axis=0)
hv_value = calc_hypervolume(pfront, ref_point=args.ref_point)
# init data frame
column_names = ['iterID']
d1 = {'iterID': np.zeros(n_samples, dtype=int)}
d2 = {'iterID': np.zeros(len(pset), dtype=int)}
# design variables
for i in range(self.n_var):
var_name = f'x{i + 1}'
d1[var_name] = X[:, i]
d2[var_name] = pset[:, i]
column_names.append(var_name)
# performance
for i in range(self.n_obj):
obj_name = f'f{i + 1}'
d1[obj_name] = Y[:, i]
obj_name = f'Pareto_f{i + 1}'
d2[obj_name] = pfront[:, i]
# predicted performance
for i in range(self.n_obj):
obj_pred_name = f'Expected_f{i + 1}'
d1[obj_pred_name] = np.zeros(n_samples)
obj_pred_name = f'Uncertainty_f{i + 1}'
d1[obj_pred_name] = np.zeros(n_samples)
obj_pred_name = f'Acquisition_f{i + 1}'
d1[obj_pred_name] = np.zeros(n_samples)
d1['Hypervolume_indicator'] = np.full(n_samples, hv_value)
self.export_data = pd.DataFrame(data=d1) # export all data
self.export_pareto = pd.DataFrame(data=d2) # export pareto data
column_names.append('ParetoFamily')
self.export_approx_pareto = pd.DataFrame(columns=column_names) # export pareto approximation data
self.has_family = hasattr(self.optimizer.selection, 'has_family') and self.optimizer.selection.has_family
def update(self, X_next, Y_next):
'''
For each algorithm iteration adds data for visualization.
Input:
X_next: proposed sample values in design space
Y_next: proposed sample values in performance space
'''
self.iter += 1
# evaluate prediction of X_next on surrogate model
val = self.optimizer.surrogate_model.evaluate(self.transformation.do(x=X_next), std=True)
Y_next_pred_mean = self.transformation.undo(y=val['F'])
Y_next_pred_std = val['S']
acquisition, _, _ = self.optimizer.acquisition.evaluate(val)
pset = self.optimizer.status['pset']
pfront = self.optimizer.status['pfront']
hv_value = self.optimizer.status['hv']
d1 = {'iterID': np.full(self.batch_size, self.iter, dtype=int)} # export all data
d2 = {'iterID': np.full(pfront.shape[0], self.iter, dtype=int)} # export pareto data
# design variables
for i in range(self.n_var):
var_name = f'x{i + 1}'
d1[var_name] = X_next[:, i]
d2[var_name] = pset[:, i]
# performance and predicted performance
for i in range(self.n_obj):
col_name = f'f{i + 1}'
d1[col_name] = Y_next[:, i]
d2['Pareto_'+col_name] = pfront[:, i]
col_name = f'Expected_f{i + 1}'
d1[col_name] = Y_next_pred_mean[:, i]
col_name = f'Uncertainty_f{i + 1}'
d1[col_name] = Y_next_pred_std[:, i]
col_name = f'Acquisition_f{i + 1}'
d1[col_name] = acquisition[:, i]
d1['Hypervolume_indicator'] = np.full(self.batch_size, hv_value)
if self.has_family:
info = self.optimizer.info
family_lbls, approx_pset, approx_pfront = info['family_lbls'], info['approx_pset'], info['approx_pfront']
approx_front_samples = approx_pfront.shape[0]
d3 = {'iterID': np.full(approx_front_samples, self.iter, dtype=int)} # export pareto approximation data
for i in range(self.n_var):
var_name = f'x{i + 1}'
d3[var_name] = approx_pset[:, i]
for i in range(self.n_obj):
d3[f'Pareto_f{i + 1}'] = approx_pfront[:, i]
d3['ParetoFamily'] = family_lbls
else:
approx_pset = self.optimizer.solver.solution['x']
val = self.optimizer.surrogate_model.evaluate(approx_pset)
approx_pfront = val['F']
approx_pset, approx_pfront = self.transformation.undo(approx_pset, approx_pfront)
# find undominated
approx_pfront, pidx = find_pareto_front(approx_pfront, return_index=True)
approx_pset = approx_pset[pidx]
approx_front_samples = approx_pfront.shape[0]
d3 = {'iterID': np.full(approx_front_samples, self.iter, dtype=int)}
for i in range(self.n_var):
var_name = f'x{i + 1}'
d3[var_name] = approx_pset[:, i]
for i in range(self.n_obj):
d3[f'Pareto_f{i + 1}'] = approx_pfront[:, i]
d3['ParetoFamily'] = np.zeros(approx_front_samples)
df1 = pd.DataFrame(data=d1)
df2 = | pd.DataFrame(data=d2) | pandas.DataFrame |
import numpy as np
import pandas as pd
from .base_test_class import DartsBaseTestClass
from darts.timeseries import TimeSeries
from darts.utils import timeseries_generation as tg
from darts.metrics import mape
from darts.models import (
NaiveSeasonal,
ExponentialSmoothing,
ARIMA,
Theta,
FourTheta,
FFT,
VARIMA
)
from ..utils.utils import SeasonalityMode, TrendMode, ModelMode
from ..logging import get_logger
from ..datasets import AirPassengersDataset, IceCreamHeaterDataset
logger = get_logger(__name__)
try:
from darts.models import RandomForest, LinearRegressionModel
TORCH_AVAILABLE = True
except ImportError:
logger.warning('Torch not installed - some local forecasting models tests will be skipped')
TORCH_AVAILABLE = False
# (forecasting models, maximum error) tuples
models = [
(ExponentialSmoothing(), 5.6),
(ARIMA(12, 2, 1), 10),
(ARIMA(1, 1, 1), 40),
(Theta(), 11.3),
(Theta(1), 20.2),
(Theta(-1), 9.8),
(FourTheta(1), 20.2),
(FourTheta(-1), 9.8),
(FourTheta(trend_mode=TrendMode.EXPONENTIAL), 5.5),
(FourTheta(model_mode=ModelMode.MULTIPLICATIVE), 11.4),
(FourTheta(season_mode=SeasonalityMode.ADDITIVE), 14.2),
(FFT(trend="poly"), 11.4),
(NaiveSeasonal(), 32.4)
]
if TORCH_AVAILABLE:
models += [(LinearRegressionModel(lags=12), 11.0),
(RandomForest(lags=12, n_estimators=200, max_depth=3), 15.5)]
# forecasting models with exogenous variables support
multivariate_models = [
(VARIMA(1, 0, 0), 55.6),
(VARIMA(1, 1, 1), 57.0),
]
dual_models = [ARIMA()]
try:
from ..models import Prophet
models.append((Prophet(), 13.5))
except ImportError:
logger.warning("Prophet not installed - will be skipping Prophet tests")
try:
from ..models import AutoARIMA
models.append((AutoARIMA(), 12.2))
dual_models.append(AutoARIMA())
PMDARIMA_AVAILABLE = True
except ImportError:
logger.warning("pmdarima not installed - will be skipping AutoARIMA tests")
PMDARIMA_AVAILABLE = False
try:
from ..models import TCNModel
TORCH_AVAILABLE = True
except ImportError:
logger.warning("Torch not installed - will be skipping Torch models tests")
TORCH_AVAILABLE = False
class LocalForecastingModelsTestCase(DartsBaseTestClass):
# forecasting horizon used in runnability tests
forecasting_horizon = 5
# dummy timeseries for runnability tests
np.random.seed(1)
ts_gaussian = tg.gaussian_timeseries(length=100, mean=50)
# real timeseries for functionality tests
ts_passengers = AirPassengersDataset().load()
ts_pass_train, ts_pass_val = ts_passengers.split_after( | pd.Timestamp("19570101") | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 13 06:05:37 2021
@author: <NAME>
"""
import results_gen_methods as rgm
import os
import intrp_tech as it
#######################################################################################################################
# ------------------------- Methods used in implementation of Interpretability Techniques --------------------------- #
#######################################################################################################################
def calculate_majority_vote(df_results, top_features_selected, title=""):
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
# sorting the columns to determine feature rank - returns the indices that would sort an array.
sorted_indices = np.argsort(-(df_results.to_numpy()), axis=0)
array_rank = np.empty_like(sorted_indices)
for i in range(len(array_rank[0])):
array_rank[sorted_indices[:, i], i] = np.arange(len(array_rank[:, i]))
# Summing the number of times a feature has been within the top n most important
truth_table = array_rank < top_features_selected
features_on_top = truth_table.astype(int)
number_occurrencies_on_top = features_on_top.sum(axis=1)
# And now plotting a heatmap showing feature importance
df_heatmap_data = pd.DataFrame(number_occurrencies_on_top, columns=['Rank-' + title])
df_heatmap_data.set_index(df_results.index.values, inplace=True)
labels = np.array(df_heatmap_data.index.values)
labels = labels.reshape((labels.shape[0], 1))
grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
# Hide ticks
ax.set_xticks([])
ax.set_yticks([])
plt.title(title, fontsize=18)
# Remove axis
ax.axis('off')
sns.heatmap(df_heatmap_data, annot=labels, cmap='Greens', fmt='', ax=ax, annot_kws={"fontsize": 8},
cbar_ax=cbar_ax, cbar_kws={"orientation": "horizontal"}, linewidths=.5).set_title(title)
file = f'{title}-rank.png'
plt.savefig(os.path.join(rgm.generating_results('Rank'), file), dpi=300)
plt.show(block=False)
plt.pause(3)
plt.close('all')
# Deleting those features that did not appear on the top (to produce a summarised figure)
df_heatmap_data_mainfeatures = df_heatmap_data.drop(df_heatmap_data[df_heatmap_data.iloc[:, 0] < 1].index)
labels = np.array(df_heatmap_data_mainfeatures.index.values)
labels = labels.reshape((labels.shape[0], 1))
grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
plt.title(title, fontsize=18)
# Hide ticks
ax.set_xticks([])
ax.set_yticks([])
# Remove axis
ax.axis('off')
sns.heatmap(df_heatmap_data_mainfeatures, annot=labels, cmap='Greens', fmt='', ax=ax, annot_kws={"fontsize": 8},
cbar_ax=cbar_ax, cbar_kws={"orientation": "horizontal"}, linewidths=.5).set_title(
title + '- Majority Voting')
file = f'{title}-Majority Voting.png'
plt.savefig(os.path.join(rgm.generating_results('Majority Voting'), file), dpi=300)
plt.show(block=False)
plt.pause(3)
plt.close('all')
return df_heatmap_data
def ensemble_feature_importance(shap_results=None, lime_results=None, pi_results=None, model_name=None,
top_feature_majority_voting=None):
import pandas as pd
import matplotlib.pyplot as plt
df_SHAP_Rank = calculate_majority_vote(shap_results, top_feature_majority_voting, title=f"Shap-{model_name}")
df_LIME_Rank = calculate_majority_vote(lime_results, top_feature_majority_voting, title=f'LIME-{model_name}')
df_PI_Rank = calculate_majority_vote(pi_results, top_feature_majority_voting, title=f'PI_{model_name}')
df_ENSEMBLE_ML_MODEL_Rank = calculate_majority_vote(shap_results + lime_results + pi_results,
top_feature_majority_voting,
title=f'{model_name}')
df_All_Feature_Importance_Rank = | pd.concat([df_SHAP_Rank, df_LIME_Rank, df_PI_Rank], axis=1, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 18:27:54 2020
@author: <NAME>
"""
# imports
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import random
import pandas as pd
from progressbar import ProgressBar
pbar = ProgressBar()
from datetime import timedelta, date
from datetime import datetime
from dateutil.relativedelta import relativedelta
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
def sleep_for(opt1, opt2):
time_for = random.uniform(opt1, opt2)
time_for_int = int(round(time_for))
sleep(abs(time_for_int - time_for))
for i in range(time_for_int, 0, -1):
sleep(1)
def daterange(date1, date2):
for n in range(int((date2 - date1).days) + 30):
yield date1 + timedelta(n)
def list_of_dates(start_date, end_date, num_days):
cur_date = start = datetime.strptime(start_date, '%Y-%m-%d').date()
end = datetime.strptime(end_date, '%Y-%m-%d').date()
dates_list = []
dates_list.append(start_date)
while cur_date < end:
# print(cur_date)
cur_date += relativedelta(days=num_days)
dates_list.append(cur_date)
# if last date is after the end date, remove
if dates_list[-1] > end:
dates_list.pop(-1)
# add the last day
dates_list.append(end)
# list of tuples of each date pairing
tup_list = []
counter = 1
for i in dates_list:
# print(i)
try:
tup_list.append((i,dates_list[counter]))
counter += 1
except: # lazy way to skip last date pairing
pass
return tup_list
def twitter_scraper(browser_path, urls, scroll_down_num, post_element_xpath,
start_date, end_date, days_between):
# setting the chromedriver path and initializing driver
driver = webdriver.Chrome(options=chrome_options)
#driver = webdriver.Chrome(executable_path=browser_path)
driver.set_page_load_timeout(100)
# create master df to append to
master_df = pd.DataFrame()
dates_list = list_of_dates(start_date, end_date, num_days=days_between)
# loop through the list of urls listed in config_and_run.py
for orig_url in pbar(urls):
print(str(orig_url))
for day_tup in dates_list:
print(str(day_tup[0]))
print(str(day_tup[1]))
url = orig_url + '%20until%3A' + str(day_tup[1]) + \
'%20since%3A' + str(day_tup[0]) + '&src=typed_query'
driver.get(url)
print(str(url))
sleep_for(10, 15) # sleep a while to be safe
# scroll x number of times
for i in range(0, scroll_down_num):
# scroll down
driver.find_element_by_xpath('//body').send_keys(Keys.END)
sleep_for(4, 7)
# get a list of each post
post_list = driver.find_elements_by_xpath(post_element_xpath)
post_text = [x.text for x in post_list]
print(post_text)
# create temp dataset of each tweet
temp_df = | pd.DataFrame(post_text, columns={'all_text'}) | pandas.DataFrame |
import pandas as pd
import numpy as np
def apply_map(data, func, inplace=False):
if isinstance(data, pd.DataFrame):
# apply and apply_map call function twice.
result = data if inplace else {}
for c in data.columns:
s = list(map(func, data[c].values))
if inplace:
result.loc[:, c] = s
else:
result[c] = s
if not inplace:
result = pd.DataFrame.from_dict(result)
return result
elif isinstance(data, pd.Series):
if inplace:
for i, e in enumerate(map(func, data.values)):
data.iat[i] = e
return data
else:
result = pd.Series(list(map(func, data.values)))
return result
elif isinstance(data, np.ndarray):
def _apply_map(x):
return apply_map(x, func, inplace=inplace)
if len(data.shape) > 1:
result = np.apply_along_axis(_apply_map, -1, data)
return result
else:
_data = | pd.Series(data) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Tools to collect Twitter data from specific accounts.
Part of the module is based on Twitter Scraper library:
https://github.com/bpb27/twitter_scraping
Author: <NAME> <<EMAIL>>
Part of https://github.com/crazyfrogspb/RedditScore project
Copyright (c) 2018 <NAME>. All rights reserved.
This work is licensed under the terms of the MIT license.
"""
import datetime
import math
import os.path as osp
import random
import warnings
from time import sleep
import pandas as pd
from dateutil import parser
import tweepy
from congress import Congress
try:
from selenium import webdriver
from selenium.common.exceptions import (NoSuchElementException,
TimeoutException,
StaleElementReferenceException,
WebDriverException)
except ImportError:
warnings.warn(('selenium library is not found, pulling tweets beyond'
' 3200 limit will be unavailable'))
def _format_day(date):
# convert date to required format
day = '0' + str(date.day) if len(str(date.day)) == 1 else str(date.day)
month = '0' + str(date.month) if len(str(date.month)
) == 1 else str(date.month)
year = str(date.year)
return '-'.join([year, month, day])
def _form_url(since, until, user):
# create url request
p1 = 'https://twitter.com/search?f=tweets&vertical=default&q=from%3A'
p2 = user + '%20since%3A' + since + '%20until%3A' + \
until + 'include%3Aretweets&src=typd'
return p1 + p2
def _increment_day(date, i):
# increment date by i days
return date + datetime.timedelta(days=i)
def _grab_tweet_by_ids(ids, api, delay=6.0):
# grab tweets by ids
full_tweets = []
start = 0
end = 100
limit = len(ids)
i = math.ceil(limit / 100)
for go in range(i):
sleep(delay)
id_batch = ids[start:end]
start += 100
end += 100
tweets = api.statuses_lookup(id_batch, tweet_mode='extended')
full_tweets.extend(tweets)
return full_tweets
def _grab_even_more_tweets(screen_name, dates, browser, delay=1.0):
# grab tweets beyond 3200 limit
startdate, enddate = dates
try:
if browser == 'Safari':
driver = webdriver.Safari()
elif browser == 'Firefox':
driver = webdriver.Firefox()
elif browser == 'Chrome':
driver = webdriver.Chrome()
else:
raise ValueError('{} browser is not supported')
except WebDriverException as e:
raise WebDriverException(('You need to download required driver'
' and add it to PATH')) from e
except AttributeError as e:
raise Exception('Check if the browser is installed') from e
except ValueError as e:
raise ValueError('{} browser is not supported') from e
days = (enddate - startdate).days + 1
id_selector = '.time a.tweet-timestamp'
tweet_selector = 'li.js-stream-item'
screen_name = screen_name.lower()
ids = []
for day in range(days):
d1 = _format_day(_increment_day(startdate, 0))
d2 = _format_day(_increment_day(startdate, 1))
url = _form_url(d1, d2, screen_name)
driver.get(url)
sleep(delay)
try:
found_tweets = driver.find_elements_by_css_selector(tweet_selector)
increment = 10
while len(found_tweets) >= increment:
driver.execute_script(
'window.scrollTo(0, document.body.scrollHeight);')
sleep(delay)
found_tweets = driver.find_elements_by_css_selector(
tweet_selector)
increment += 10
for tweet in found_tweets:
try:
id = tweet.find_element_by_css_selector(
id_selector).get_attribute('href').split('/')[-1]
ids.append(id)
except StaleElementReferenceException as e:
pass
except NoSuchElementException:
pass
except TimeoutException:
pass
startdate = _increment_day(startdate, 1)
return ids
def _handle_tweepy_error(e, user):
if e.api_code == 34:
warnings.warn("{} doesn't exist".format(user))
else:
warnings.warn('Error encountered for user {}: '.format(
user) + str(e))
return pd.DataFrame()
def generate_api(twitter_creds_list):
auths = []
for creds in twitter_creds_list:
try:
auth = tweepy.OAuthHandler(
creds['consumer_key'], creds['consumer_secret'])
auth.set_access_token(creds['access_key'], creds['access_secret'])
except KeyError as e:
raise Exception(("twitter_creds must contain cosnumer_key,"
" consumer_secret, access_key, and access_secret keys"))
auths.append(auth)
api = tweepy.API(
auths,
retry_count=3,
retry_delay=5,
retry_errors=set([401, 404, 500, 503]),
monitor_rate_limit=True,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
return api
def grab_tweets(twitter_creds=None, api=None, screen_name=None, user_id=None, timeout=0.1,
fields=None, get_more=False, browser='Firefox',
start_date=None):
"""
Get all tweets from the account
Parameters
----------
twitter_creds: dict
Dictionary or list with Twitter authentication credentials.
Has to contain consumer_key, consumer_secret, access_key, access_secret
screen_name : str, optional
Twitter handle to grab tweets for
user_id: int, optional
Twitter user_id to grab tweets for
timeout: float, optional
Sleeping time between requests
fields: iter, optional
Extra fields to pull from the tweets
get_more: bool, optional
If True, attempt to use Selenium to get more tweets after reaching
3200 tweets limit
browser: {'Firefox', 'Chrome', 'Safari'}, optional
Browser for Selenium to use. Corresponding browser and its webdriver
have to be installed
start_date: datetime.date, optional
The first date to start pulling extra tweets. If None, use 2016/01/01
Returns
----------
alltweets: pandas DataFrame
Pandas Dataframe with all collected tweets
"""
if not (bool(screen_name) != bool(user_id)):
raise ValueError('You have to provide either screen_name or user_id')
api = generate_api(list(twitter_creds))
if user_id:
try:
u = api.get_user(int(user_id))
screen_name = u.screen_name
reg_date = u.created_at.date()
sleep(timeout)
except tweepy.TweepError as e:
return _handle_tweepy_error(e, user_id)
except ValueError as e:
raise ValueError('{} is not a valid user_id'.format(user_id)) from e
else:
u = api.get_user(screen_name)
reg_date = u.created_at.date()
sleep(timeout)
if fields is None:
fields = []
if start_date is None or start_date < reg_date:
start_date = reg_date
alltweets = []
print("Now grabbing tweets for {}".format(screen_name))
try:
new_tweets = api.user_timeline(screen_name=screen_name,
user_id=user_id, count=200,
tweet_mode='extended')
except tweepy.TweepError as e:
return _handle_tweepy_error(e, screen_name)
alltweets.extend(new_tweets)
if not alltweets:
return pd.DataFrame()
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
new_tweets = api.user_timeline(screen_name=screen_name, count=200,
max_id=oldest, tweet_mode='extended')
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
if new_tweets:
print('{} tweets downloaded'.format(len(alltweets)))
sleep(timeout)
if get_more and len(new_tweets) == 0 and len(alltweets) > 3200:
end_date = alltweets[-1].created_at.date()
print('Date of the last collected tweet: {}'.format(end_date))
if end_date > start_date:
print('Now grabbing tweets beyond 3200 limit')
dates = (start_date, end_date)
ids = _grab_even_more_tweets(screen_name, dates, browser)
tweets = _grab_tweet_by_ids(ids, api)
alltweets.extend(tweets)
full_tweets = []
for tweet in alltweets:
if hasattr(tweet, 'retweeted_status'):
text = tweet.retweeted_status.full_text
else:
text = tweet.full_text
retweet = False
if getattr(tweet, 'retweeted_status', None) is not None:
retweet = True
tweet_fields = [text, tweet.id, tweet.created_at, retweet]
for field in fields:
tweet_fields.append(getattr(tweet, field, None))
full_tweets.append(tweet_fields)
full_tweets = pd.DataFrame(
full_tweets, columns=(['text', 'id', 'created_at', 'retweet'] +
fields))
full_tweets['screen_name'] = screen_name
if user_id:
full_tweets['user_id'] = user_id
full_tweets.drop_duplicates('id', inplace=True)
return full_tweets
def collect_congress_tweets(congress_list, congress_tweets_file,
meta_info_file, start_date, twitter_creds,
chambers=None, propublica_api_key=None,
append_frequency=10, browser='Chrome',
fields=None, shuffle=False):
"""Collect tweets from American Congressmen.
Parameters
----------
congress_list : iterable
List with Congress numbers to collect data for.
congress_tweets_file : str
Path to the output file with tweets.
meta_info_file : str
Path to the output file with meta information about the Congress.
start_date : str
The first date to start pulling extra tweets.
twitter_creds : type
Dictionary or list with Twitter authentication credentials.
Has to contain consumer_key, consumer_secret, access_key, access_secret
chambers : iterable, optional
List of Chambers to collect tweets for (the default is Senate and House).
propublica_api_key : str, optional
API key for free Propublica Congress API (the default is None).
https://www.propublica.org/datastore/api/propublica-congress-api
append_frequency : int, optional
Frequency of dumping new tweets to CSV (the default is 10).
browser : str, optional
Browser for Selenium to use. Corresponding browser and its webdriver
have to be installed (the default is 'Chrome').
fields : iter, optional
Extra fields to pull from the tweets (the default is retweet_count and favorite_count).
shuffle: bool, optional
Whether to shuffle twitter handles before collecting.
"""
if chambers is None:
chambers = ['House', 'Senate']
if fields is None:
fields = ['retweet_count', 'favorite_count']
if osp.isfile(meta_info_file):
members = | pd.read_csv(meta_info_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
build_features.py
---------------------
Functions for exploratory data analysis, e.g., feature engineering and
dimensionality reduction.
"""
import os
from pathlib import Path
import click
import logging
import json
import datetime
import unicodedata as ucd
import numpy as np
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import StandardScaler, RobustScaler
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class Features:
def __init__(self, session):
self._ROOT = str(Path(os.getcwd()).parents[1])
self._input_data_path = os.path.join(self._ROOT, 'data/processed/')
self._supplemental_path = os.path.join(self._ROOT, 'data/supplemental/')
self._session_number = session
self._filehandle = '_'.join([str(self._session_number), 'dataframe.csv'])
self._input_file = os.path.join(self._input_data_path, self._filehandle)
self._data = | pd.read_csv(self._input_file, encoding='utf-8') | pandas.read_csv |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = | date_range('20130101', periods=4) | pandas.date_range |
# License: Apache-2.0
import databricks.koalas as ks
import pandas as pd
import numpy as np
import pytest
from pandas.testing import assert_frame_equal
from gators.imputers.numerics_imputer import NumericsImputer
from gators.imputers.int_imputer import IntImputer
from gators.imputers.float_imputer import FloatImputer
from gators.imputers.object_imputer import ObjectImputer
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture()
def data():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num():
X_int = pd.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing():
X_int = pd.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.copy(),
'float': X_float.copy(),
'object': X_object.copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
@pytest.fixture()
def data_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
X_int_ks = ks.from_pandas(X_int)
X_float_ks = ks.from_pandas(X_float)
X_object_ks = ks.from_pandas(X_object)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_dict = {
'int': X_int_ks,
'float': X_float_ks,
'object': X_object_ks,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num_ks():
X_int = ks.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing_ks():
X_int = ks.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = ks.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.to_pandas().copy(),
'float': X_float.to_pandas().copy(),
'object': X_object.to_pandas().copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = ks.from_pandas(pd.concat([X_int, X_float, X_object], axis=1))
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
def test_int_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_float_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_object_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_int_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']).to_pandas(),
X_expected_dict['int'],)
@pytest.mark.koalas
def test_float_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['float'].transform(X_dict['float']).to_pandas(),
X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['object'].transform(X_dict['object']).to_pandas(),
X_expected_dict['object'],
)
def test_int_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_float_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_object_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['object'].transform_numpy(X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
@pytest.mark.koalas
def test_int_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_float_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['object'].transform_numpy(
X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
def test_num_int_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_num_float_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
@pytest.mark.koalas
def test_num_int_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['int'].transform(
X_dict['int'].to_pandas()), X_expected_dict['int'],
)
@pytest.mark.koalas
def test_num_float_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['float'].transform(
X_dict['float'].to_pandas()), X_expected_dict['float'],
)
def test_num_int_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_num_float_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = | pd.DataFrame(X_new_np, columns=X_dict['float'].columns) | pandas.DataFrame |
import pytest
import pandas as pd
import goldenowl.asset.asset as at
def get_prdata():
date_range =[elem for elem in pd.date_range(start="1990-01-01",end="2000-01-01", freq='1D')];
price_dict ={ date_range[i] : i+1 for i in range(0,len(date_range))}
tup = list(zip(price_dict.keys(), price_dict.values()));
data = pd.DataFrame(tup, columns=['Date', 'Close']);
data['Open'] = data['High'] = data['Low'] = 0;
return data;
def test_Value():
pr_d = get_prdata();
inst = at.Asset('Test', pr_d);
val = inst.getValue("1993-01-01");
name = inst.getName();
assert name == "Test", "Asset name incorrect"
assert (val) == 1097, "Asset value retrieval failed"
val1 = inst.getValue("2000-01-01");
val2 = inst.getValue("2021-01-01");
assert val1 == val2, "Nearest Asset value retrieval failed"
def test_Returns():
pr_d = get_prdata();
inst = at.Asset('Test', pr_d);
val = inst.getReturnsTimeFrame('1W');
assert type(val) == type( | pd.Series() | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.