prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import collections.abc as cabc
from copy import copy
from typing import Union, Optional, Sequence, Any, Mapping, List, Tuple
import numpy as np
import pandas as pd
from anndata import AnnData
from cycler import Cycler
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from pandas.api.types import is_categorical_dtype
from matplotlib import pyplot as pl, colors
from matplotlib.cm import get_cmap
from matplotlib import rcParams
from matplotlib import patheffects
from matplotlib.colors import Colormap, Normalize
from functools import partial
from .. import _utils
from .._utils import (
_IGraphLayout,
_FontWeight,
_FontSize,
ColorLike,
VBound,
circles,
check_projection,
check_colornorm,
)
from .._docs import (
doc_adata_color_etc,
doc_edges_arrows,
doc_scatter_embedding,
doc_scatter_spatial,
doc_show_save_ax,
)
from ... import logging as logg
from ..._settings import settings
from ..._utils import sanitize_anndata, _doc_params, Empty, _empty
from ..._compat import Literal
@_doc_params(
adata_color_etc=doc_adata_color_etc,
edges_arrows=doc_edges_arrows,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def embedding(
adata: AnnData,
basis: str,
*,
color: Union[str, Sequence[str], None] = None,
gene_symbols: Optional[str] = None,
use_raw: Optional[bool] = None,
sort_order: bool = True,
edges: bool = False,
edges_width: float = 0.1,
edges_color: Union[str, Sequence[float], Sequence[str]] = 'grey',
neighbors_key: Optional[str] = None,
arrows: bool = False,
arrows_kwds: Optional[Mapping[str, Any]] = None,
groups: Optional[str] = None,
components: Union[str, Sequence[str]] = None,
layer: Optional[str] = None,
projection: Literal['2d', '3d'] = '2d',
scale_factor: Optional[float] = None,
color_map: Union[Colormap, str, None] = None,
cmap: Union[Colormap, str, None] = None,
palette: Union[str, Sequence[str], Cycler, None] = None,
na_color: ColorLike = "lightgray",
na_in_legend: bool = True,
size: Union[float, Sequence[float], None] = None,
frameon: Optional[bool] = None,
legend_fontsize: Union[int, float, _FontSize, None] = None,
legend_fontweight: Union[int, _FontWeight] = 'bold',
legend_loc: str = 'right margin',
legend_fontoutline: Optional[int] = None,
vmax: Union[VBound, Sequence[VBound], None] = None,
vmin: Union[VBound, Sequence[VBound], None] = None,
vcenter: Union[VBound, Sequence[VBound], None] = None,
norm: Union[Normalize, Sequence[Normalize], None] = None,
add_outline: Optional[bool] = False,
outline_width: Tuple[float, float] = (0.3, 0.05),
outline_color: Tuple[str, str] = ('black', 'white'),
ncols: int = 4,
hspace: float = 0.25,
wspace: Optional[float] = None,
title: Union[str, Sequence[str], None] = None,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
ax: Optional[Axes] = None,
return_fig: Optional[bool] = None,
**kwargs,
) -> Union[Figure, Axes, None]:
"""\
Scatter plot for user specified embedding basis (e.g. umap, pca, etc)
Parameters
----------
basis
Name of the `obsm` basis to use.
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
check_projection(projection)
sanitize_anndata(adata)
# Setting up color map for continuous values
if color_map is not None:
if cmap is not None:
raise ValueError("Cannot specify both `color_map` and `cmap`.")
else:
cmap = color_map
cmap = copy(get_cmap(cmap))
cmap.set_bad(na_color)
kwargs["cmap"] = cmap
# Prevents warnings during legend creation
na_color = colors.to_hex(na_color, keep_alpha=True)
if size is not None:
kwargs['s'] = size
if 'edgecolor' not in kwargs:
# by default turn off edge color. Otherwise, for
# very small sizes the edge will not reduce its size
# (https://github.com/theislab/scanpy/issues/293)
kwargs['edgecolor'] = 'none'
if groups:
if isinstance(groups, str):
groups = [groups]
args_3d = dict(projection='3d') if projection == '3d' else {}
# Deal with Raw
if use_raw is None:
# check if adata.raw is set
use_raw = layer is None and adata.raw is not None
if use_raw and layer is not None:
raise ValueError(
"Cannot use both a layer and the raw representation. Was passed:"
f"use_raw={use_raw}, layer={layer}."
)
if wspace is None:
# try to set a wspace that is not too large or too small given the
# current figure size
wspace = 0.75 / rcParams['figure.figsize'][0] + 0.02
if adata.raw is None and use_raw:
raise ValueError(
"`use_raw` is set to True but AnnData object does not have raw. "
"Please check."
)
# turn color into a python list
color = [color] if isinstance(color, str) or color is None else list(color)
if title is not None:
# turn title into a python list if not None
title = [title] if isinstance(title, str) else list(title)
# get the points position and the components list
# (only if components is not None)
data_points, components_list = _get_data_points(
adata, basis, projection, components, scale_factor
)
# Setup layout.
# Most of the code is for the case when multiple plots are required
# 'color' is a list of names that want to be plotted.
# Eg. ['Gene1', 'louvain', 'Gene2'].
# component_list is a list of components [[0,1], [1,2]]
if (
not isinstance(color, str)
and isinstance(color, cabc.Sequence)
and len(color) > 1
) or len(components_list) > 1:
if ax is not None:
raise ValueError(
"Cannot specify `ax` when plotting multiple panels "
"(each for a given value of 'color')."
)
if len(components_list) == 0:
components_list = [None]
# each plot needs to be its own panel
num_panels = len(color) * len(components_list)
fig, grid = _panel_grid(hspace, wspace, ncols, num_panels)
else:
if len(components_list) == 0:
components_list = [None]
grid = None
if ax is None:
fig = pl.figure()
ax = fig.add_subplot(111, **args_3d)
# turn vmax and vmin into a sequence
if isinstance(vmax, str) or not isinstance(vmax, cabc.Sequence):
vmax = [vmax]
if isinstance(vmin, str) or not isinstance(vmin, cabc.Sequence):
vmin = [vmin]
if isinstance(vcenter, str) or not isinstance(vcenter, cabc.Sequence):
vcenter = [vcenter]
if isinstance(norm, Normalize) or not isinstance(norm, cabc.Sequence):
norm = [norm]
if 's' in kwargs:
size = kwargs.pop('s')
if size is not None:
# check if size is any type of sequence, and if so
# set as ndarray
import pandas.core.series
if (
size is not None
and isinstance(size, (cabc.Sequence, pandas.core.series.Series, np.ndarray))
and len(size) == adata.shape[0]
):
size = np.array(size, dtype=float)
else:
size = 120000 / adata.shape[0]
# make the plots
axs = []
import itertools
idx_components = range(len(components_list))
# use itertools.product to make a plot for each color and for each component
# For example if color=[gene1, gene2] and components=['1,2, '2,3'].
# The plots are: [
# color=gene1, components=[1,2], color=gene1, components=[2,3],
# color=gene2, components = [1, 2], color=gene2, components=[2,3],
# ]
for count, (value_to_plot, component_idx) in enumerate(
itertools.product(color, idx_components)
):
color_source_vector = _get_color_source_vector(
adata,
value_to_plot,
layer=layer,
use_raw=use_raw,
gene_symbols=gene_symbols,
groups=groups,
)
color_vector, categorical = _color_vector(
adata,
value_to_plot,
color_source_vector,
palette=palette,
na_color=na_color,
)
# Order points
order = slice(None)
if sort_order is True and value_to_plot is not None and categorical is False:
# Higher values plotted on top, null values on bottom
order = np.argsort(-color_vector, kind="stable")[::-1]
elif sort_order and categorical:
# Null points go on bottom
order = np.argsort(~pd.isnull(color_source_vector), kind="stable")
# Set orders
if isinstance(size, np.ndarray):
size = np.array(size)[order]
color_source_vector = color_source_vector[order]
color_vector = color_vector[order]
_data_points = data_points[component_idx][order, :]
# if plotting multiple panels, get the ax from the grid spec
# else use the ax value (either user given or created previously)
if grid:
ax = pl.subplot(grid[count], **args_3d)
axs.append(ax)
if not (settings._frameon if frameon is None else frameon):
ax.axis('off')
if title is None:
if value_to_plot is not None:
ax.set_title(value_to_plot)
else:
ax.set_title('')
else:
try:
ax.set_title(title[count])
except IndexError:
logg.warning(
"The title list is shorter than the number of panels. "
"Using 'color' value instead for some plots."
)
ax.set_title(value_to_plot)
if not categorical:
vmin_float, vmax_float, vcenter_float, norm_obj = _get_vboundnorm(
vmin, vmax, vcenter, norm, count, color_vector
)
normalize = check_colornorm(
vmin_float,
vmax_float,
vcenter_float,
norm_obj,
)
else:
normalize = None
# make the scatter plot
if projection == '3d':
cax = ax.scatter(
_data_points[:, 0],
_data_points[:, 1],
_data_points[:, 2],
marker=".",
c=color_vector,
rasterized=settings._vector_friendly,
norm=normalize,
**kwargs,
)
else:
scatter = (
partial(ax.scatter, s=size, plotnonfinite=True)
if scale_factor is None
else partial(circles, s=size, ax=ax) # size in circles is radius
)
if add_outline:
# the default outline is a black edge followed by a
# thin white edged added around connected clusters.
# To add an outline
# three overlapping scatter plots are drawn:
# First black dots with slightly larger size,
# then, white dots a bit smaller, but still larger
# than the final dots. Then the final dots are drawn
# with some transparency.
bg_width, gap_width = outline_width
point = np.sqrt(size)
gap_size = (point + (point * gap_width) * 2) ** 2
bg_size = (np.sqrt(gap_size) + (point * bg_width) * 2) ** 2
# the default black and white colors can be changes using
# the contour_config parameter
bg_color, gap_color = outline_color
# remove edge from kwargs if present
# because edge needs to be set to None
kwargs['edgecolor'] = 'none'
# remove alpha for outline
alpha = kwargs.pop('alpha') if 'alpha' in kwargs else None
ax.scatter(
_data_points[:, 0],
_data_points[:, 1],
s=bg_size,
marker=".",
c=bg_color,
rasterized=settings._vector_friendly,
norm=normalize,
**kwargs,
)
ax.scatter(
_data_points[:, 0],
_data_points[:, 1],
s=gap_size,
marker=".",
c=gap_color,
rasterized=settings._vector_friendly,
norm=normalize,
**kwargs,
)
# if user did not set alpha, set alpha to 0.7
kwargs['alpha'] = 0.7 if alpha is None else alpha
cax = scatter(
_data_points[:, 0],
_data_points[:, 1],
marker=".",
c=color_vector,
rasterized=settings._vector_friendly,
norm=normalize,
**kwargs,
)
# remove y and x ticks
ax.set_yticks([])
ax.set_xticks([])
if projection == '3d':
ax.set_zticks([])
# set default axis_labels
name = _basis2name(basis)
if components is not None:
axis_labels = [name + str(x + 1) for x in components_list[component_idx]]
elif projection == '3d':
axis_labels = [name + str(x + 1) for x in range(3)]
else:
axis_labels = [name + str(x + 1) for x in range(2)]
ax.set_xlabel(axis_labels[0])
ax.set_ylabel(axis_labels[1])
if projection == '3d':
# shift the label closer to the axis
ax.set_zlabel(axis_labels[2], labelpad=-7)
ax.autoscale_view()
if edges:
_utils.plot_edges(ax, adata, basis, edges_width, edges_color, neighbors_key)
if arrows:
_utils.plot_arrows(ax, adata, basis, arrows_kwds)
if value_to_plot is None:
# if only dots were plotted without an associated value
# there is not need to plot a legend or a colorbar
continue
if legend_fontoutline is not None:
path_effect = [
patheffects.withStroke(linewidth=legend_fontoutline, foreground='w')
]
else:
path_effect = None
# Adding legends
if categorical:
_add_categorical_legend(
ax,
color_source_vector,
palette=_get_palette(adata, value_to_plot),
scatter_array=_data_points,
legend_loc=legend_loc,
legend_fontweight=legend_fontweight,
legend_fontsize=legend_fontsize,
legend_fontoutline=path_effect,
na_color=na_color,
na_in_legend=na_in_legend,
multi_panel=bool(grid),
)
else:
# TODO: na_in_legend should have some effect here
pl.colorbar(cax, ax=ax, pad=0.01, fraction=0.08, aspect=30)
if return_fig is True:
return fig
axs = axs if grid else ax
_utils.savefig_or_show(basis, show=show, save=save)
if show is False:
return axs
def _panel_grid(hspace, wspace, ncols, num_panels):
from matplotlib import gridspec
n_panels_x = min(ncols, num_panels)
n_panels_y = np.ceil(num_panels / n_panels_x).astype(int)
# each panel will have the size of rcParams['figure.figsize']
fig = pl.figure(
figsize=(
n_panels_x * rcParams['figure.figsize'][0] * (1 + wspace),
n_panels_y * rcParams['figure.figsize'][1],
),
)
left = 0.2 / n_panels_x
bottom = 0.13 / n_panels_y
gs = gridspec.GridSpec(
nrows=n_panels_y,
ncols=n_panels_x,
left=left,
right=1 - (n_panels_x - 1) * left - 0.01 / n_panels_x,
bottom=bottom,
top=1 - (n_panels_y - 1) * bottom - 0.1 / n_panels_y,
hspace=hspace,
wspace=wspace,
)
return fig, gs
def _get_vboundnorm(
vmin: Sequence[VBound],
vmax: Sequence[VBound],
vcenter: Sequence[VBound],
norm: Sequence[Normalize],
index: int,
color_vector: Sequence[float],
) -> Tuple[Union[float, None], Union[float, None]]:
"""
Evaluates the value of vmin, vmax and vcenter, which could be a
str in which case is interpreted as a percentile and should
be specified in the form 'pN' where N is the percentile.
Eg. for a percentile of 85 the format would be 'p85'.
Floats are accepted as p99.9
Alternatively, vmin/vmax could be a function that is applied to
the list of color values (`color_vector`). E.g.
def my_vmax(color_vector): np.percentile(color_vector, p=80)
Parameters
----------
index
This index of the plot
color_vector
List or values for the plot
Returns
-------
(vmin, vmax, vcenter, norm) containing None or float values for
vmin, vmax, vcenter and matplotlib.colors.Normalize or None for norm.
"""
out = []
for v_name, v in [('vmin', vmin), ('vmax', vmax), ('vcenter', vcenter)]:
if len(v) == 1:
# this case usually happens when the user sets eg vmax=0.9, which
# is internally converted into list of len=1, but is expected that this
# value applies to all plots.
v_value = v[0]
else:
try:
v_value = v[index]
except IndexError:
logg.error(
f"The parameter {v_name} is not valid. If setting multiple {v_name} values,"
f"check that the length of the {v_name} list is equal to the number "
"of plots. "
)
v_value = None
if v_value is not None:
if isinstance(v_value, str) and v_value.startswith('p'):
try:
float(v_value[1:])
except ValueError:
logg.error(
f"The parameter {v_name}={v_value} for plot number {index + 1} is not valid. "
f"Please check the correct format for percentiles."
)
# interpret value of vmin/vmax as quantile with the following syntax 'p99.9'
v_value = np.nanpercentile(color_vector, q=float(v_value[1:]))
elif callable(v_value):
# interpret vmin/vmax as function
v_value = v_value(color_vector)
if not isinstance(v_value, float):
logg.error(
f"The return of the function given for {v_name} is not valid. "
"Please check that the function returns a number."
)
v_value = None
else:
try:
float(v_value)
except ValueError:
logg.error(
f"The given {v_name}={v_value} for plot number {index + 1} is not valid. "
f"Please check that the value given is a valid number, a string "
f"starting with 'p' for percentiles or a valid function."
)
v_value = None
out.append(v_value)
out.append(norm[0] if len(norm) == 1 else norm[index])
return tuple(out)
def _wraps_plot_scatter(wrapper):
import inspect
params = inspect.signature(embedding).parameters.copy()
wrapper_sig = inspect.signature(wrapper)
wrapper_params = wrapper_sig.parameters.copy()
params.pop("basis")
params.pop("kwargs")
wrapper_params.pop("adata")
params.update(wrapper_params)
annotations = {
k: v.annotation
for k, v in params.items()
if v.annotation != inspect.Parameter.empty
}
if wrapper_sig.return_annotation is not inspect.Signature.empty:
annotations["return"] = wrapper_sig.return_annotation
wrapper.__signature__ = inspect.Signature(
list(params.values()), return_annotation=wrapper_sig.return_annotation
)
wrapper.__annotations__ = annotations
return wrapper
# API
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
edges_arrows=doc_edges_arrows,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in UMAP basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'umap', **kwargs)
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
edges_arrows=doc_edges_arrows,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'tsne', **kwargs)
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def diffmap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in Diffusion Map basis.
Parameters
----------
{adata_color_etc}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'diffmap', **kwargs)
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
edges_arrows=doc_edges_arrows,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def draw_graph(
adata: AnnData, *, layout: Optional[_IGraphLayout] = None, **kwargs
) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in graph-drawing basis.
Parameters
----------
{adata_color_etc}
layout
One of the :func:`~scanpy.tl.draw_graph` layouts.
By default, the last computed layout is used.
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
if layout is None:
layout = str(adata.uns['draw_graph']['params']['layout'])
basis = 'draw_graph_' + layout
if 'X_' + basis not in adata.obsm_keys():
raise ValueError(
'Did not find {} in adata.obs. Did you compute layout {}?'.format(
'draw_graph_' + layout, layout
)
)
return embedding(adata, basis, **kwargs)
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def pca(
adata,
*,
annotate_var_explained: bool = False,
show: Optional[bool] = None,
return_fig: Optional[bool] = None,
save: Union[bool, str, None] = None,
**kwargs,
) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in PCA coordinates.
Use the parameter `annotate_var_explained` to annotate the explained variance.
Parameters
----------
{adata_color_etc}
annotate_var_explained
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
if not annotate_var_explained:
return embedding(
adata, 'pca', show=show, return_fig=return_fig, save=save, **kwargs
)
else:
if 'pca' not in adata.obsm.keys() and 'X_pca' not in adata.obsm.keys():
raise KeyError(
f"Could not find entry in `obsm` for 'pca'.\n"
f"Available keys are: {list(adata.obsm.keys())}."
)
label_dict = {
'PC{}'.format(i + 1): 'PC{} ({}%)'.format(i + 1, round(v * 100, 2))
for i, v in enumerate(adata.uns['pca']['variance_ratio'])
}
if return_fig is True:
# edit axis labels in returned figure
fig = embedding(adata, 'pca', return_fig=return_fig, **kwargs)
for ax in fig.axes:
ax.set_xlabel(label_dict[ax.xaxis.get_label().get_text()])
ax.set_ylabel(label_dict[ax.yaxis.get_label().get_text()])
return fig
else:
# get the axs, edit the labels and apply show and save from user
axs = embedding(adata, 'pca', show=False, save=False, **kwargs)
if isinstance(axs, list):
for ax in axs:
ax.set_xlabel(label_dict[ax.xaxis.get_label().get_text()])
ax.set_ylabel(label_dict[ax.yaxis.get_label().get_text()])
else:
axs.set_xlabel(label_dict[axs.xaxis.get_label().get_text()])
axs.set_ylabel(label_dict[axs.yaxis.get_label().get_text()])
_utils.savefig_or_show('pca', show=show, save=save)
if show is False:
return axs
@_wraps_plot_scatter
@_doc_params(
adata_color_etc=doc_adata_color_etc,
scatter_spatial=doc_scatter_spatial,
scatter_bulk=doc_scatter_embedding,
show_save_ax=doc_show_save_ax,
)
def spatial(
adata,
*,
basis: str = "spatial",
img: Union[np.ndarray, None] = None,
img_key: Union[str, None, Empty] = _empty,
library_id: Union[str, Empty] = _empty,
crop_coord: Tuple[int, int, int, int] = None,
alpha_img: float = 1.0,
bw: Optional[bool] = False,
size: float = 1.0,
scale_factor: Optional[float] = None,
spot_size: Optional[float] = None,
na_color: Optional[ColorLike] = None,
show: Optional[bool] = None,
return_fig: Optional[bool] = None,
save: Union[bool, str, None] = None,
**kwargs,
) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in spatial coordinates.
This function allows overlaying data on top of images.
Use the parameter `img_key` to see the image in the background
And the parameter `library_id` to select the image.
By default, `'hires'` and `'lowres'` are attempted.
Use `crop_coord`, `alpha_img`, and `bw` to control how it is displayed.
Use `size` to scale the size of the Visium spots plotted on top.
As this function is designed to for imaging data, there are two key assumptions
about how coordinates are handled:
1. The origin (e.g `(0, 0)`) is at the top left – as is common convention
with image data.
2. Coordinates are in the pixel space of the source image, so an equal
aspect ratio is assumed.
If your anndata object has a `"spatial"` entry in `.uns`, the `img_key`
and `library_id` parameters to find values for `img`, `scale_factor`,
and `spot_size` arguments. Alternatively, these values be passed directly.
Parameters
----------
{adata_color_etc}
{scatter_spatial}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
Examples
--------
This function behaves very similarly to other embedding plots like
:func:`~scanpy.pl.umap`
>>> adata = sc.datasets.visium_sge("Targeted_Visium_Human_Glioblastoma_Pan_Cancer")
>>> sc.pp.calculate_qc_metrics(adata, inplace=True)
>>> sc.pl.spatial(adata, color="log1p_n_genes_by_counts")
See Also
--------
:func:`scanpy.datasets.visium_sge`
Example visium data.
:tutorial:`spatial/basic-analysis`
Tutorial on spatial analysis.
"""
# get default image params if available
library_id, spatial_data = _check_spatial_data(adata.uns, library_id)
img, img_key = _check_img(spatial_data, img, img_key, bw=bw)
spot_size = _check_spot_size(spatial_data, spot_size)
scale_factor = _check_scale_factor(
spatial_data, img_key=img_key, scale_factor=scale_factor
)
crop_coord = _check_crop_coord(crop_coord, scale_factor)
na_color = _check_na_color(na_color, img=img)
if bw:
cmap_img = "gray"
else:
cmap_img = None
circle_radius = size * scale_factor * spot_size * 0.5
axs = embedding(
adata,
basis=basis,
scale_factor=scale_factor,
size=circle_radius,
na_color=na_color,
show=False,
save=False,
**kwargs,
)
if not isinstance(axs, list):
axs = [axs]
for ax in axs:
cur_coords = np.concatenate([ax.get_xlim(), ax.get_ylim()])
if img is not None:
ax.imshow(img, cmap=cmap_img, alpha=alpha_img)
else:
ax.set_aspect("equal")
ax.invert_yaxis()
if crop_coord is not None:
ax.set_xlim(crop_coord[0], crop_coord[1])
ax.set_ylim(crop_coord[3], crop_coord[2])
else:
ax.set_xlim(cur_coords[0], cur_coords[1])
ax.set_ylim(cur_coords[3], cur_coords[2])
_utils.savefig_or_show('show', show=show, save=save)
if show is False or return_fig is True:
return axs
# Helpers
def _get_data_points(
adata, basis, projection, components, scale_factor
) -> Tuple[List[np.ndarray], List[Tuple[int, int]]]:
"""
Returns the data points corresponding to the selected basis, projection and/or components.
Because multiple components are given (eg components=['1,2', '2,3'] the
returned data are lists, containing each of the components. When only one component is plotted
the list length is 1.
Returns
-------
data_points
Each entry is a numpy array containing the data points
components
The cleaned list of components. Eg. [(0,1)] or [(0,1), (1,2)]
for components = [1,2] and components=['1,2', '2,3'] respectively
"""
if basis in adata.obsm.keys():
basis_key = basis
elif f"X_{basis}" in adata.obsm.keys():
basis_key = f"X_{basis}"
else:
raise KeyError(
f"Could not find entry in `obsm` for '{basis}'.\n"
f"Available keys are: {list(adata.obsm.keys())}."
)
n_dims = 2
if projection == '3d':
# check if the data has a third dimension
if adata.obsm[basis_key].shape[1] == 2:
if settings._low_resolution_warning:
logg.warning(
'Selected projections is "3d" but only two dimensions '
'are available. Only these two dimensions will be plotted'
)
else:
n_dims = 3
if components == 'all':
from itertools import combinations
r_value = 3 if projection == '3d' else 2
_components_list = np.arange(adata.obsm[basis_key].shape[1]) + 1
components = [
",".join(map(str, x)) for x in combinations(_components_list, r=r_value)
]
components_list = []
offset = 0
if basis == 'diffmap':
offset = 1
if components is not None:
# components have different formats, either a list with integers, a string
# or a list of strings.
if isinstance(components, str):
# eg: components='1,2'
components_list.append(
tuple(int(x.strip()) - 1 + offset for x in components.split(','))
)
elif isinstance(components, cabc.Sequence):
if isinstance(components[0], int):
# components=[1,2]
components_list.append(tuple(int(x) - 1 + offset for x in components))
else:
# in this case, the components are str
# eg: components=['1,2'] or components=['1,2', '2,3]
# More than one component can be given and is stored
# as a new item of components_list
for comp in components:
components_list.append(
tuple(int(x.strip()) - 1 + offset for x in comp.split(','))
)
else:
raise ValueError(
"Given components: '{}' are not valid. Please check. "
"A valid example is `components='2,3'`"
)
# check if the components are present in the data
try:
data_points = []
for comp in components_list:
data_points.append(adata.obsm[basis_key][:, comp])
except Exception: # TODO catch the correct exception
raise ValueError(
"Given components: '{}' are not valid. Please check. "
"A valid example is `components='2,3'`"
)
if basis == 'diffmap':
# remove the offset added in the case of diffmap, such that
# plot_scatter can print the labels correctly.
components_list = [
tuple(number - 1 for number in comp) for comp in components_list
]
else:
data_points = [np.array(adata.obsm[basis_key])[:, offset : offset + n_dims]]
components_list = []
if scale_factor is not None: # if basis need scale for img background
data_points[0] = np.multiply(data_points[0], scale_factor)
return data_points, components_list
def _add_categorical_legend(
ax,
color_source_vector,
palette: dict,
legend_loc: str,
legend_fontweight,
legend_fontsize,
legend_fontoutline,
multi_panel,
na_color,
na_in_legend: bool,
scatter_array=None,
):
"""Add a legend to the passed Axes."""
if na_in_legend and | pd.isnull(color_source_vector) | pandas.isnull |
#!/usr/bin/env python3
"""
This module prepares a table comparing mass spec MM peptide results from gencode
against the fasta sequences of various orf calling methods
Inputs:
------------------------------------------------------------------------------------------
1. gene isoname file: map transcript name to gene name
2. Gencode peptides file: AllPeptides file from mass spec search using Gencode
3. Pacbio peptides file: Pacbio refined database fasta file
4. Pacbio six frame translation: file listing all possible peptides that can be detected per gene in Pacbio Database
------------------------------------------------------------------------------------------
Output Tables:
------------------------------------------------------------------------------------------
- table comparing pacbio coverage of Gencode peptide results from MM
------------------------------------------------------------------------------------------
"""
#%%
# Import Modules
import pandas as pd
import re
import argparse
import os
from pathlib import Path
from collections import defaultdict
from builtins import any
from Bio import SeqIO
#%%
# Import Files
parser = argparse.ArgumentParser(description='Process peptide related input file locations')
parser.add_argument('--gc_pep', '-gc', action='store', dest='gc_pep_file', help='Genecode AllPeptides file location')
parser.add_argument('--gene_to_isoname', '-gmap', action='store', dest='gene_isoname_file', help = 'Gene names to transcript names file location')
parser.add_argument('--pb_refined_fasta', '-pb', action='store', dest='pb_ref_file', help='Pacbio refined database fasta file location')
parser.add_argument('--pb_6frm', '-sft', action='store', dest='pb_6frm_file', help='Pacbio Six Frame Translation file location')
parser.add_argument('--pb_gene', action='store', dest='pb_gene', help='PB to Gene file')
parser.add_argument("--cpat_all_orfs", action='store', dest='cpat_all_orfs')
parser.add_argument("--cpat_best_orf", action='store', dest='cpat_best_orf')
parser.add_argument("--cpat_orf_protein_fasta", action='store', dest='cpat_protein_fasta')
parser.add_argument('-odir', '--output_directory', action='store', dest='odir', help = 'ouput directory')
results = parser.parse_args()
gene_isoname_file = results.gene_isoname_file
gc_pep_file = results.gc_pep_file
pb_refined_file = results.pb_ref_file
pb_6frm_file = results.pb_6frm_file
pb_gene_file = results.pb_gene
cpat_best_orf_file = results.cpat_best_orf
cpat_all_orfs_file = results.cpat_all_orfs
cpat_protein_sequence_file = results.cpat_protein_fasta
#%%
# Input Filepaths
# gene_isoname_file = '/Users/bj8th/Documents/Lab-for-Proteoform-Systems-Biology/LRPG-Visualization/data/jurkat/gene_isoname.tsv'
# gc_pep_file = '/Users/bj8th/Documents/Lab-for-Proteoform-Systems-Biology/LRPG-Visualization/data/jurkat/AllPeptides_Gencode.psmtsv'
# pb_refined_file = '/Users/bj8th/Documents/Lab-for-Proteoform-Systems-Biology/LRPG-Visualization/data/jurkat/jurkat_orf_aggregated.fasta'
# pb_6frm_file ='/Users/bj8th/Documents/Lab-for-Proteoform-Systems-Biology/LRPG-Visualization/data/jurkat/jurkat.6frame.fasta'
# pb_gene_file = '/Users/bj8th/Documents/Lab-for-Proteoform-Systems-Biology/LRPG-Visualization/data/jurkat/pb_gene.tsv'
# cpat_best_orf_file = '/Users/bj8th/Documents/Lab-for-Proteoform-Systems-Biology/LRPG-Visualization/data/jurkat/jurkat.ORF_prob.best.tsv'
# cpat_protein_sequence_file = '/Users/bj8th/Documents/Lab-for-Proteoform-Systems-Biology/LRPG-Visualization/data/jurkat/jurkat.ORF_seqs.fa'
# cpat_all_orfs_file = '/Users/bj8th/Documents/Lab-for-Proteoform-Systems-Biology/LRPG-Visualization/data/jurkat/jurkat.ORF_prob.tsv'
#%%
# loading gencode peptide data, initiate a dataframe
df = pd.read_table(gene_isoname_file, header=None)
isoname_gene = pd.Series(df[0].values, index=df[1]).to_dict()
# import gencode metamorpheus peptide data, filter to 1%FDR
g_cols = ['Base Sequence', 'Protein Accession', 'Decoy/Contaminant/Target', 'QValue']
g_data = pd.read_table(gc_pep_file, usecols = g_cols)
g_data.columns = ['pep_seq', 'acc', 'dct', 'qval']
g_tdata = g_data[(g_data['qval'] <= 0.01) & (g_data['dct']=='T')].reset_index(drop=True)
gc = g_tdata
# pb to gene map
df_pb_gene = pd.read_table(pb_gene_file)
pb_gene = pd.Series(df_pb_gene.gene.values, index=df_pb_gene.pb_acc).to_dict()
# replace each isoname with its gene name, explode distinct genes
def get_gene_name(row):
isonames = re.split('\||\.(?=\D)', row['acc'])
genes = set()
for isoname in isonames:
# TODO - fix the issue of unparsed isoanmes
if isoname not in isoname_gene: continue # issues with lowercase parsing, Rob working on it 201122
gene = isoname_gene[isoname]
genes.add(gene)
genes = list(genes)
if len(genes) == 0:
return 'no_match'
return genes
gc['genes'] = gc.apply(get_gene_name, axis=1)
# TODO - debug, see TODO above
# print out isonames without a gene match
# found 282 peptides with no matched gene, Rob troubleshooting issue (with parsing of lowercase chars)
#gc[(gc['genes'] == 'no_match')]
# ~5K peptides duplicated in the allpeptides file, due to peptides with diff. mods identified
# gc[gc.duplicated(keep=False)]
gc = gc[['genes', 'pep_seq']]
gc = gc.explode('genes')
gc = gc.drop_duplicates()
gc.columns = ['gene', 'pep_seq']
gc_gene = gc.groupby('gene')['pep_seq'].apply(list).reset_index(name='gc_peps')
gc_gene['peps_in_gc'] = gc_gene['gc_peps'].apply(len)
# ~77K unique peptide-to-gene pairs
# 8018 unique genes
# presence of gc peptides in pb databse (generic function)
def get_pb_pep_coverage_stats(row, pb_dict):
gene, peps, peps_in_gc = row['gene'], row['gc_peps'], row['peps_in_gc']
if gene not in pb_dict:
return 0, 0, 0
else:
num_peps_in_db = 0
for pep in peps:
if pep in pb_dict[gene]:
num_peps_in_db += 1
frac_peps_in_db = num_peps_in_db / peps_in_gc
return 1, num_peps_in_db, frac_peps_in_db
## add in info for pb 6frm
pb_6frm = defaultdict()
for rec in SeqIO.parse(pb_6frm_file, 'fasta'):
pb_6frm[rec.id] = str(rec.seq)
db = ('6frm', pb_6frm)
gc_gene[['in_{}'.format(db[0]),
'peps_in_{}'.format(db[0]),
'frac_peps_in_{}'.format(db[0])]] \
= gc_gene.apply(lambda x: get_pb_pep_coverage_stats(x, db[1]), axis=1, result_type='expand')
## add in info for orf refined (ben)
pb_refined = {}
for rec in SeqIO.parse(pb_refined_file, 'fasta'):
gene = rec.description.split('=')[1]
if gene not in pb_refined:
pb_refined[gene] = ''
pb_refined[gene] += '-' + str(rec.seq)
db = ('refined', pb_refined)
gc_gene[['in_{}'.format(db[0]),
'peps_in_{}'.format(db[0]),
'frac_peps_in_{}'.format(db[0])]] \
= gc_gene.apply(lambda x: get_pb_pep_coverage_stats(x, db[1]), axis=1, result_type='expand')
#%%
## add in info for best cpat orfs
df_best_orfs = pd.read_table(cpat_best_orf_file)
#%%
df_best_orfs = df_best_orfs[['ID', 'Hexamer']]
df_best_orfs.columns = ['orf', 'coding_score']
df_best_orfs['pb_acc'] = df_best_orfs['orf'].str.split('_').str[0]
df_best_orfs['gene'] = df_best_orfs['pb_acc'].map(pb_gene)
#%%
# load in cpat prot seqs
pb_seq = defaultdict()
for rec in SeqIO.parse(cpat_protein_sequence_file, 'fasta'):
pb_seq[rec.id] = str(rec.seq.translate())
# ...continuted cpat best orf
df_best_orfs['prot_seq'] = df_best_orfs['orf'].map(pb_seq)
df_best_orf_grp = df_best_orfs[['gene', 'prot_seq']].groupby('gene')['prot_seq'].apply(lambda x: '-'.join(x)).reset_index()
pb_best = pd.Series(df_best_orf_grp.prot_seq.values, index=df_best_orf_grp.gene).to_dict()
db = ('cpat_best', pb_best)
gc_gene[['in_{}'.format(db[0]),
'peps_in_{}'.format(db[0]),
'frac_peps_in_{}'.format(db[0])]] \
= gc_gene.apply(lambda x: get_pb_pep_coverage_stats(x, db[1]), axis=1, result_type='expand')
## add in info for longest pb orf
cpat = pd.read_table(cpat_all_orfs_file)
cpat['pb_acc'] = cpat['ID'].str.split('_').str[0]
cpat = cpat.loc[cpat.groupby('pb_acc')['ORF'].idxmax()][['pb_acc', 'ID']]
cpat.columns = ['pb_acc', 'orf']
cpat['gene'] = cpat['pb_acc'].map(pb_gene)
cpat['prot_seq'] = cpat['orf'].map(pb_seq)
cpat = cpat[['gene', 'prot_seq']].groupby('gene')['prot_seq'].apply(lambda x: '-'.join(x)).reset_index()
pb_long = | pd.Series(cpat.prot_seq.values, index=cpat.gene) | pandas.Series |
import pandas as pd
def putdateon(df):
"""Puts a date on the dataframe, and the year."""
return (
df
.assign(release_date = pd.to_datetime(df.release_date))
.pipe(lambda x: x.assign(year = x.release_date.dt.year))
)
movies = putdateon(pd.read_csv('~/data/tmdb/movies.csv'))
cast = pd.read_csv('~/data/tmdb/cast.csv')
crew = | pd.read_csv('~/data/tmdb/crew.csv') | pandas.read_csv |
import logging
import copy
import pandas as pd
import numpy as np
from datetime import date
from spaceone.core import cache
from spaceone.core.manager import BaseManager
from spaceone.cost_analysis.error import *
from spaceone.cost_analysis.manager.identity_manager import IdentityManager
from spaceone.cost_analysis.model.cost_model import Cost, MonthlyCost, CostQueryHistory
from spaceone.cost_analysis.manager.data_source_rule_manager import DataSourceRuleManager
from spaceone.cost_analysis.manager.exchange_rate_manager import ExchangeRateManager
_LOGGER = logging.getLogger(__name__)
class CostManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cost_model: Cost = self.locator.get_model('Cost')
self.monthly_cost_model: MonthlyCost = self.locator.get_model('MonthlyCost')
self.data_source_rule_mgr: DataSourceRuleManager = self.locator.get_manager('DataSourceRuleManager')
self.exchange_rate_map = None
def create_cost(self, params, execute_rollback=True):
def _rollback(cost_vo):
_LOGGER.info(f'[create_cost._rollback] '
f'Delete cost : {cost_vo.name} '
f'({cost_vo.cost_id})')
cost_vo.delete()
if 'region_code' in params and 'provider' in params:
params['region_key'] = f'{params["provider"]}.{params["region_code"]}'
if 'usd_cost' not in params:
original_currency = params['original_currency']
original_cost = params['original_cost']
if original_currency == 'USD':
params['usd_cost'] = params['original_cost']
else:
self._load_exchange_rate_map(params['domain_id'])
rate = self.exchange_rate_map.get(original_currency)
if rate is None:
raise ERROR_UNSUPPORTED_CURRENCY(supported_currency=list(self.exchange_rate_map.keys()))
params['usd_cost'] = round(original_cost / rate, 15)
params['billed_year'] = params['billed_at'].strftime('%Y')
params['billed_month'] = params['billed_at'].strftime('%Y-%m')
params['billed_date'] = params['billed_at'].strftime('%Y-%m-%d')
params = self.data_source_rule_mgr.change_cost_data(params)
cost_vo: Cost = self.cost_model.create(params)
if execute_rollback:
self.transaction.add_rollback(_rollback, cost_vo)
return cost_vo
def create_monthly_cost(self, params):
return self.monthly_cost_model.create(params)
def delete_cost(self, cost_id, domain_id):
cost_vo: Cost = self.get_cost(cost_id, domain_id)
cost_vo.delete()
def get_cost(self, cost_id, domain_id, only=None):
return self.cost_model.get(cost_id=cost_id, domain_id=domain_id, only=only)
def filter_costs(self, **conditions):
return self.cost_model.filter(**conditions)
def list_costs(self, query={}):
return self.cost_model.query(**query)
def stat_costs(self, query):
return self.cost_model.stat(**query)
def list_monthly_costs(self, query={}):
return self.monthly_cost_model.query(**query)
def stat_monthly_costs(self, query):
return self.monthly_cost_model.stat(**query)
def list_cost_query_history(self, query={}):
history_model: CostQueryHistory = self.locator.get_model('CostQueryHistory')
return history_model.query(**query)
@cache.cacheable(key='stat-costs-history:{domain_id}:{query_hash}', expire=600)
def create_cost_query_history(self, query, query_hash, granularity, start, end, domain_id):
def _rollback(history_vo):
_LOGGER.info(f'[create_cost_query_history._rollback] Delete cost query history: {query_hash}')
history_vo.delete()
history_model: CostQueryHistory = self.locator.get_model('CostQueryHistory')
history_vos = history_model.filter(query_hash=query_hash, domain_id=domain_id)
if history_vos.count() == 0:
history_vo = history_model.create({
'query_hash': query_hash,
'query_options': copy.deepcopy(query),
'granularity': granularity,
'start': start,
'end': end,
'domain_id': domain_id
})
self.transaction.add_rollback(_rollback, history_vo)
else:
history_vos[0].update({
'start': start,
'end': end
})
@cache.cacheable(key='stat-costs:{domain_id}:{query_hash}', expire=3600 * 24)
def stat_costs_with_cache(self, query, query_hash, domain_id):
return self.stat_costs(query)
@cache.cacheable(key='stat-monthly-costs:{domain_id}:{query_hash}', expire=3600 * 24)
def stat_monthly_costs_with_cache(self, query, query_hash, domain_id):
return self.stat_monthly_costs(query)
@staticmethod
def remove_stat_cache(domain_id):
cache.delete_pattern(f'stat-costs:{domain_id}:*')
cache.delete_pattern(f'stat-monthly-costs:{domain_id}:*')
@staticmethod
def is_monthly_cost(granularity, start, end):
if granularity in ['ACCUMULATED', 'MONTHLY'] and start.day == 1 and end.day == 1:
return True
else:
return False
def add_date_range_filter(self, query, granularity, start: date, end: date):
query['filter'] = query.get('filter') or []
if self.is_monthly_cost(granularity, start, end):
query['filter'].append({
'k': 'billed_month',
'v': start.strftime('%Y-%m'),
'o': 'gte'
})
query['filter'].append({
'k': 'billed_month',
'v': end.strftime('%Y-%m'),
'o': 'lt'
})
else:
query['filter'].append({
'k': 'billed_date',
'v': start.strftime('%Y-%m-%d'),
'o': 'gte'
})
query['filter'].append({
'k': 'billed_date',
'v': end.strftime('%Y-%m-%d'),
'o': 'lt'
})
return query
@staticmethod
def get_date_ranges_between_start_end(start, end):
date_ranges = []
is_first_day = start.day == 1
is_last_day = end.day == 1
for ts in pd.date_range(start, end, freq='MS'):
dt = ts.date()
if dt != start and dt != end:
date_ranges.append(dt)
if is_first_day and is_last_day:
return [
{'start': start, 'end': end}
]
elif is_first_day and not is_last_day:
return [
{'start': start, 'end': date_ranges[-1]},
{'start': date_ranges[-1], 'end': end}
]
elif not is_first_day and is_last_day:
return [
{'start': start, 'end': date_ranges[0]},
{'start': date_ranges[0], 'end': end}
]
else:
return [
{'start': start, 'end': date_ranges[0]},
{'start': date_ranges[0], 'end': date_ranges[-1]},
{'start': date_ranges[-1], 'end': end}
]
def make_accumulated_query(self, group_by, limit, query_filter, include_others=False,
include_usage_quantity=False, has_project_group_id=False):
aggregate = [
{
'group': {
'keys': self._get_keys_from_group_by(group_by),
'fields': [
{
'key': 'usd_cost',
'name': 'usd_cost',
'operator': 'sum'
}
]
}
},
{
'sort': {
'key': 'usd_cost',
'desc': True
}
}
]
if include_usage_quantity:
aggregate[0]['group']['fields'].append({
'key': 'usage_quantity',
'name': 'usage_quantity',
'operator': 'sum'
})
if limit and include_others is False and has_project_group_id is False:
aggregate.append({'limit': limit})
return {
'aggregate': aggregate,
'filter': query_filter
}
def make_trend_query(self, granularity, group_by, limit, query_filter, include_others=False,
include_usage_quantity=False, has_project_group_id=False):
aggregate = [
{
'group': {
'keys': self._get_keys_from_group_by(group_by) + self._get_keys_from_granularity(granularity),
'fields': [
{
'key': 'usd_cost',
'name': 'usd_cost',
'operator': 'sum'
}
]
}
},
{
'group': {
'keys': self._get_keys_from_group_by(group_by, change_key=True),
'fields': [
{
'key': 'usd_cost',
'name': 'total_usd_cost',
'operator': 'sum'
},
{
'name': 'usd_cost_values',
'operator': 'push',
'fields': [
{
'key': 'date',
'name': 'k'
},
{
'key': 'usd_cost',
'name': 'v'
}
]
}
]
}
},
{
'sort': {
'key': 'total_usd_cost',
'desc': True
}
}
]
if include_usage_quantity:
aggregate[0]['group']['fields'].append({
'key': 'usage_quantity',
'name': 'usage_quantity',
'operator': 'sum'
})
aggregate[1]['group']['fields'] += [
{
'key': 'usage_quantity',
'name': 'total_usage_quantity',
'operator': 'sum'
},
{
'name': 'usage_quantity_values',
'operator': 'push',
'fields': [
{
'key': 'date',
'name': 'k'
},
{
'key': 'usage_quantity',
'name': 'v'
}
]
}
]
if limit and include_others is False and has_project_group_id is False:
aggregate.append({'limit': limit})
aggregate.append({
'project': {
'fields': [
{
'key': 'total_usd_cost',
'name': 'total_usd_cost'
},
{
'key': 'usd_cost_values',
'name': 'usd_cost',
'operator': 'array_to_object'
}
]
}
})
if include_usage_quantity:
aggregate[-1]['project']['fields'] += [
{
'key': 'total_usage_quantity',
'name': 'total_usage_quantity'
},
{
'key': 'usage_quantity_values',
'name': 'usage_quantity',
'operator': 'array_to_object'
}
]
return {
'aggregate': aggregate,
'filter': query_filter
}
def sum_costs_by_project_group(self, response, granularity, group_by, domain_id, include_usage_quantity=False):
has_project_id = 'project_id' in group_by
cost_keys = list(set(group_by[:] + ['project_id', 'usd_cost']))
if include_usage_quantity:
cost_keys.append('usage_quantity')
if granularity != 'ACCUMULATED':
cost_keys.append('total_usd_cost')
if include_usage_quantity:
cost_keys.append('total_usage_quantity')
cost_keys.remove('project_group_id')
results = response.get('results', [])
projects_info = self._get_projects_info(domain_id)
project_df = pd.DataFrame(projects_info, columns=['project_id', 'project_group_id'])
cost_df = pd.DataFrame(results, columns=cost_keys)
join_df = | pd.merge(cost_df, project_df, on=['project_id'], how='left') | pandas.merge |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import StandardScaler
import time
import timeit
start = timeit.default_timer()
t0 = time.clock()
#file read
DS = | pd.read_csv('diabetes.csv') | pandas.read_csv |
import tensorflow.keras.backend as K
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
import numpy as np
import pandas as pd
import warnings
from scipy import stats
from scipy.stats import entropy
T = 50
class Predictor:
f = None
def __init__(self, model):
#self.f = K.function([model.layers[0].input, K.learning_phase()], [model.layers[-1].output])
self.model = model
def predict_with_uncertainty(self, x, n_iter, dropout):
predictions = []
for i in range(0, n_iter):
predictions.append(self.f([x, dropout]))
return np.array(predictions)
def predict(self, x):
#predictions = self.f([x, False])
predictions = [self.model.predict(x)]
return np.array(predictions)
class EnsamblePredictor:
f_assemble = None
def __init__(self, models):
self.f_assemble = [K.function([m.layers[0].input, K.learning_phase()], [m.layers[-1].output]) for m in models]
def predict(self, x):
predictions = []
for f in self.f_assemble:
predictions.append(f([x, False]))
return np.array(predictions)
## predict
def _addPredictions(df, mean_prob, y_val, onehot=True):
if not onehot:
y_val = to_categorical(y_val)
df['y_hat'] = y_val.argmax(axis=1).flatten()
if len(y_val[0]) == 2:
df['p_0'] = mean_prob[:,:, :1].flatten()
df['p_1'] = 1 - df['p_0']
df['p'] = mean_prob.max(axis=2).flatten()
df['y'] = mean_prob.argmax(axis=2).flatten()
df['defect'] = df['y_hat'] != df['y']
return df
def _addSimpleScores(df, prob, mean_prob, shape):
# Defect Eval.
df['c_false'] = np.where(df['defect'], 1, 0)
df['p_defect'] = np.where(df['defect'], -1*(1-df['p']), -1*df['p'] )
df['c_true'] = np.where(df['defect'], 0, 1)
df['p_correct'] = np.where(df['defect'] == False, df['p'], 1-df['p'] )
# Least Coeficient
df['u_lc'] = 1 - df['p']
# Highest Margin
u_diff = []
for i in range(shape):
p = mean_prob[:, i][0]
s = sorted(list(p))[-2:]
u = 1 - (s[-1] - s[0])
u_diff.append(u)
df['u_hm'] = u_diff
# Entropy
df['u_e'] = entropy(mean_prob.T, base=2).flatten()
return df
def _addAdvancedScores(df, prob, mean_prob, var_prob):
# Variance
df['u_var'] = var_prob.mean(axis=2).flatten()
# Mutual Information (BALD)
def f(x) :
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return x * np.where(x != 0, np.log2(x), 0)
bald = np.apply_along_axis(f, 0, prob).sum(axis=0).sum(axis=2) / prob.shape[0]
df['u_bald'] = (df['u_e'] + bald[0])
# Variation Ratio
vr = []
for i in range(prob.shape[2]): #N
vr_i = []
for j in range(prob.shape[0]):
p = prob[j, 0, i]
arg = p.argmax(axis=0)
vr_i.append(arg)
vr.append(1 - (vr_i.count(stats.mode(vr_i)[0][0]) / len(vr_i)))
df['u_vr'] = vr
# Uncertainty Kwon
epis = np.mean(prob**2, axis=0) - np.mean(prob, axis=0)**2
df['u_ea'] = np.mean(epis + np.mean(prob*(1-prob), axis=0), axis=2).reshape(prob.shape[2])
def f(x):
diag = np.diag(x)
outer = np.outer(x, x.T)
diag = np.diag((diag - outer))
return diag
a = np.apply_along_axis(f, 0, prob)
b = epis
df['u_ea2'] = (((a.mean(axis=3) + b.mean(axis=2)).mean(axis=0)).reshape(prob.shape[2]))
return df
# util
def predict(model, X_val, y_val):
model_pred = Predictor(model)
prob = model_pred.predict(X_val)
print(prob.shape)
df = pd.DataFrame()
df = _addPredictions(df, prob, y_val)
df = _addSimpleScores(df, prob, prob, prob.shape[1])
return df
def predict_bbb(model, X_val, y_val):
model_pred = Predictor(model)
prob = model_pred.predict_with_uncertainty(X_val, T, dropout=False)
print(prob.shape)
df = pd.DataFrame()
mean_prob = prob.mean(axis=0)
var_prob = prob.var(axis=0)
df = _addPredictions(df, mean_prob, y_val)
df = _addSimpleScores(df, prob, mean_prob, prob.shape[2])
df = _addAdvancedScores(df, prob, mean_prob, var_prob)
return df
def predict_mcdropout(model, X_val, y_val):
model_pred = Predictor(model)
prob = model_pred.predict_with_uncertainty(X_val, T, dropout=True)
print(prob.shape)
df = pd.DataFrame()
mean_prob = prob.mean(axis=0)
var_prob = prob.var(axis=0)
df = _addPredictions(df, mean_prob, y_val)
df = _addSimpleScores(df, prob, mean_prob, prob.shape[2])
df = _addAdvancedScores(df, prob, mean_prob, var_prob)
return df
def predict_ensamble(models, X_val, y_val):
model_pred = EnsamblePredictor(models)
prob = model_pred.predict(X_val)
print(prob.shape)
df = | pd.DataFrame() | pandas.DataFrame |
# This file is part of the mt5se package
# mt5se home: https://github.com/paulo-al-castro/mt5se
# Author: <NAME>
# Date: 2020-11-17
## chamadas que sao enviadas para a corretora (brokerage), logo exigem comunicação com a mesma
# broker module
import MetaTrader5 as mt5
import pandas as pd
import numpy as np
import random
from math import *
from datetime import datetime
from datetime import timedelta
# importamos o módulo pytz para trabalhar com o fuso horário
import pytz
from pytz import timezone
sptz=pytz.timezone('Brazil/East')
etctz=pytz.timezone('etc/utc') # os tempos sao armazenados na timezone ETC (Greenwich sem horario de verao)
path=None # Metatrader program file path
datapath=None # Metatrader path to data folder
commonDatapath=None # Metatrader common data path
company=None #broker name
platform=None # digital plataform (M)
"""
login, // número da conta (TODO)
password="PASSWORD", // senha
server="SERVER", // nome do servidor como definido no terminal
timeout=TIMEOUT // tempo de espera esgotado
"""
def connect( ):
#if not se.connect():
#print(“Error on connection”, se.last_error())
#exit():
res= mt5.initialize()
if not res:
ac=mt5.terminal_info()
path=ac.path
datapath=ac.data_path
commonDatapath=ac.commondata_path
company=x.company
platform=x.name
return res
def accountInfo():
#acc=se.accountInfo() # it returns a dictionary
#acc['login'] # Account id
#acc['balance'] # Account balance in the deposit currency
# acc['equity'] # Account equity in the deposit currency
#acc['margin'] #Account margin used in the deposit currency
#acc['margin_free'] # Free margin of an account in the deposit currency
#acc['assets'] # The current assets of an account
# acc['name'] #Client name
# acc['server'] # Trade server name
# acc['currency'] # Account currency, BRL for Brazilian Real
account_info = mt5.account_info()
#print("account info")
return account_info
"""
returns the current number of assets of the given symbol
"""
def getShares(symbolId):
pos= mt5.positions_get(symbol=symbolId)
if pos!=None and pos!=():
d=pos[0]._asdict()
return d['volume']
else:
return 0
return pos['volume']
"""
It returns if the market is open or not for new orders.
Note that markets can close in different times for different assets, therefore
you need to inform the target asset. The default target assets is se stock.
It there is no tick for 60 seconds, the market is considered closed!
"""
def isMarketOpen(asset='seSA3'):
# si=mt5.symbol_info(asset)
# if si!=None:
# if si.trade_mode==mt5.SYMBOL_TRADE_MODE_FULL: # it does not work in XP/se (always True)
# return True
# else:
# return False
# return False
t_secs=mt5.symbol_info_tick(asset).time # time in seconds
now_dt=datetime.now(etctz)+timedelta(hours=-3)
last_tick_dt=datetime.fromtimestamp(t_secs,etctz)
#print(last_tick_dt)
#print(now_dt)
if now_dt>last_tick_dt+timedelta(seconds=60):
return False
else:
return True
"""
It returns if the market is still open but just for closing orders.
Note that markets can close in different times for different assets, therefore
you need to inform the target asset. The default target assets is se stock
"""
#def isMarketClosing(asset='seSA3'): # it does not work in XP/se (always false)
# si=mt5.symbol_info(asset)
# if si!=None:
# if si.trade_mode==mt5.SYMBOL_TRADE_MODE_CLOSEONLY:
# return True
# else:
# return False
# return False
"""
returns the max volume of shares thay you can buy, with your free margin
it also observes the volume step (a.k.a minimum number of shares you can trade)
"""
def getAfforShares(assetId,money=None,price=None):
if money==None:
money=mt5.account_info().margin_free
if price==None:
close=mt5.symbol_info_tick(assetId).last
else:
close=price
step=mt5.symbol_info(assetId).volume_step
free=0
while free*close<money:
free=free+step
return free-step
def getSharesStep(assetId,money=None):
return mt5.symbol_info(assetId).volume_step
def sendOrder(order):
if order==None:
return False
# enviamos a solicitação de negociação
result = mt5.order_send(order)
if result.retcode != mt5.TRADE_RETCODE_DONE: # if error
print("Sent order failed < {} > retcode={}".format(result.comment,result.retcode))
# solicitamos o resultado na forma de dicionário e exibimos elemento por elemento
dic=result._asdict()
setLastError(dic['comment'])
# for field in dic.keys():
# print(" {}={}".format(field,dic[field]))
# #se esta for uma estrutura de uma solicitação de negociação, também a exibiremos elemento a elemento
# if field=="request":
# traderequest_dict=dic[field]._asdict()
# for tradereq_filed in traderequest_dict:
# print(" traderequest: {}={}".format(tradereq_filed,traderequest_dict[tradereq_filed]))
return False
else:
return True
def cancelOrder(o):# TO DO
# action= TRADE_ACTION_REMOVE
print("To do....")
def numOrders(): #returns the number of active orders
result=mt5.orders_total()
if result==None:
setLastError("Error on getting orders total")
return -1
else:
return result
#order fields description:
#order_id | buy_sell | volume | price | sl | tp |
#ticket | time_setup time_setup_msc time_expiration type type_time type_filling state magic
# volume_current price_open sl tp price_current symbol comment external_id
# ulong magic; // Expert Advisor -conselheiro- ID (número mágico)
# ulong order; // Bilhetagem da ordem
#string symbol; // Símbolo de negociação
# double volume; // Volume solicitado para uma encomenda em lotes
# double price; // Preço
# double stoplimit; // Nível StopLimit da ordem
# double sl; // Nível Stop Loss da ordem
# double tp; // Nível Take Profit da ordem
# ulong deviation; // Máximo desvio possível a partir do preço requisitado
# ENUM_ORDER_TYPE type; // Tipo de ordem
# ORDER_TYPE_BUY Ordem de Comprar a Mercado
# ORDER_TYPE_SELL Ordem de Vender a Mercado
# ORDER_TYPE_BUY_LIMIT Ordem pendente Buy Limit
# ORDER_TYPE_SELL_LIMIT Ordem pendente Sell Limit
# ORDER_TYPE_BUY_STOP Ordem pendente Buy Stop
# ORDER_TYPE_SELL_STOP Ordem pendente Sell Stop
# ORDER_TYPE_BUY_STOP_LIMIT Ao alcançar o preço da ordem, uma ordem pendente Buy Limit é colocada no preço StopLimit
# ORDER_TYPE_SELL_STOP_LIMIT Ao alcançar o preço da ordem, uma ordem pendente Sell Limit é colocada no preço StopLimit
# ORDER_TYPE_CLOSE_BY Ordem de fechamento da posição oposta
# ENUM_ORDER_TYPE_FILLING type_filling; // Tipo de execução da ordem
#ORDER_FILLING_FOK Esta política de preenchimento significa que uma ordem pode ser preenchida somente na quantidade especificada. Se a quantidade desejada do ativo não está disponível no mercado, a ordem não será executada.
# ENUM_ORDER_TYPE_TIME type_time; // Tipo de expiração da ordem
# ORDER_TIME_DAY Ordem válida até o final do dia corrente de negociação
# datetime expiration; // Hora de expiração da ordem (para ordens do tipo ORDER_TIME_SPECIFIED))
# string comment; // Comentário sobre a ordem
# ulong position; // Bilhete da posição
# ulong position_by; // Bilhete para uma posição oposta
def getOrders(): # returns a dataframe with all active orders
orders=mt5.orders_get()
if orders == None or len(orders)==0:
print("No orders, error code={}".format(mt5.last_error()))
return None
else:
print("Total orders:",len(orders))
df=pd.DataFrame(list(orders),columns=orders[0]._asdict().keys())
return df
def getDailYBars(symbol, start,end=None): # sao inclusas barras com tempo de abertura <= end.
# definimos o fuso horário como UTC
#timezone = pytz.timezone("Etc/UTC")
if end==None:
end=datetime.now()
if type(start).__name__!='datetime':
if type(start).__name__!='int':
print('Error, start should be a datetime from package datetime or int')
else:
start_day=datetime.now() #- timedelta(days=start)
rates=mt5.copy_rates_from(symbol,mt5.TIMEFRAME_D1,start_day,start)
# criamos a partir dos dados obtidos DataFrame
rates_frame=pd.DataFrame(rates)
rates_frame['time']=pd.to_datetime(rates_frame['time'], unit='s')
return rates_frame
else:
rates=mt5.copy_rates_range(symbol,mt5.TIMEFRAME_D1,start,end)
# criamos a partir dos dados obtidos DataFrame
rates_frame= | pd.DataFrame(rates) | pandas.DataFrame |
"""
This script does the following:
Loads various word2vec models with different hyperparameters, then obtains the word embeddings for common words
in its vocabulary (>5 frequency). Then performs KMeans clustering with N=3 clusters on the word vectors.
Finally performs PCA (Principal Component Analysis) on the word vectors and projects them into a 2D plane so we
can visualize the plots. The plots come with color coded words too indicating the cluster assigned to that word.
------------------------------------------------------------NOTE--------------------------------------------------
Essentially tasks that were to be done in mat_sensitivity.py, mat_plot.py and mat_cool.py are all done in one script.
@ Author: <NAME>
"""
import gensim
from gensim.models import Word2Vec
import warnings
import pickle
from nltk.cluster import KMeansClusterer
import nltk
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
s = [r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\other_models\default_size",
r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\other_models\size_100",
r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\other_models\size_300",
r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\other_models\size_400",
r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\other_models\window_5",
r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\other_models\window_10",
r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\other_models\window_15"]
warnings.simplefilter("ignore", np.ComplexWarning)
# with open(r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\model_example_phraser.pkl", 'rb') as f:
# data = pickle.load(f)
# print(data)
# with open(r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\model_example_accuracies.pkl", 'rb') as f:
# acc_data = pickle.load(f)
# print(acc_data)
# pretrained_model = r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\pretrained_embeddings"
# model = Word2Vec.load(pretrained_model)
# with open(r"C:\Users\212803971\Documents\A-course\software_proj\fresh_start\mat2vec\mat2vec\training\models\model_example_loss.pkl", 'rb') as f:
# loss_data = pickle.load(f)
# print(loss_data)
def plot_word_embeddings(model):
print("The model vocab is as follows ---------------------------------------------")
print(list(model.wv.vocab))
X = model[model.wv.vocab]
NUM_CLUSTERS=3
kclusterer = KMeansClusterer(NUM_CLUSTERS, distance=nltk.cluster.util.cosine_distance, repeats=25)
assigned_clusters = kclusterer.cluster(X, assign_clusters=True)
print (assigned_clusters)
labels = np.array(assigned_clusters)
assigned_0 = np.where(labels==0,1,0)
assigned_1 = np.where(labels==1,1,0)
assigned_2 = np.where(labels==2,1,0)
# PCA
df = | pd.DataFrame(X) | pandas.DataFrame |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = | read_csv(path, header=[0, 1], index_col=[0]) | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
# Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(NullFrequencyError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other - td, expected)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta( | Series(['00:00:02']) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 11:05:09 2018
@author: abaena
"""
#******************************************************************************
#Add logmapper-agent directory to python path for module execution
#******************************************************************************
if __name__ == '__main__':
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..','..')))
#******************************************************************************
import os
import logging
import datetime
import pandas as pd
import numpy as np
import config.config as cfg
import logmappercommon.utils.postgres_util as db
import logmappercommon.utils.logmapper_util as lmutil
import logmappermaster.dao.master_dao as masterdao
import logmappercommon.definitions.logmapperkeys as lmkey
#%%
"""
Global Initialization. Constants definitions.
"""
logger = logging.getLogger(__name__)
EMPTY_CHARACTER = np.nan #'?'
EMPTY_VALUE=0
#%%
"""
*******************************************************************************
CREA DATOS TIPO PANDA CON MATRIZ PARA PROCESAMIENTO DE ARCHIVOS
*******************************************************************************
"""
#%%
"""
*******************************************************************************
*******************************************************************************
"""
def arrangeColumns(conn):
cursor = conn.cursor()
df = | pd.read_sql_query("SELECT * FROM lmp_measure_type", connDbMaster) | pandas.read_sql_query |
from datetime import datetime, timedelta, timezone
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
from athenian.api.controllers.features.entries import MetricEntriesCalculator
from athenian.api.controllers.features.github.deployment_metrics import \
group_deployments_by_environments, group_deployments_by_participants, \
group_deployments_by_repositories
from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter
from athenian.api.controllers.miners.types import ReleaseParticipationKind
from athenian.api.controllers.settings import LogicalRepositorySettings
from athenian.api.defer import wait_deferred, with_defer
from athenian.api.models.persistentdata.models import DeployedComponent
from athenian.api.models.web import DeploymentMetricID
@pytest.fixture(scope="module")
def sample_deps() -> pd.DataFrame:
rnid = DeployedComponent.repository_node_id.name
return pd.DataFrame.from_dict({
"components": [pd.DataFrame([{rnid: 1}, {rnid: 2}]),
pd.DataFrame([{rnid: 3}, {rnid: 1}]),
pd.DataFrame([{rnid: 3}, {rnid: 2}]),
pd.DataFrame([{rnid: 1}]),
pd.DataFrame([{rnid: 3}])],
"pr_authors": [[1, 2, 3], [1, 4, 5], [2, 4, 6], [], [3]],
"commit_authors": [[1, 2, 3], [1, 4, 5, 6], [2, 4, 6], [7], [3]],
"release_authors": [[], [], [1, 2], [], [7]],
"environment": ["1", "2", "1", "3", "3"],
})
def test_group_deployments_by_repositories_smoke(sample_deps):
assert_array_equal(group_deployments_by_repositories([[1, 2], [2, 3]], sample_deps),
[[0, 1, 2, 3], [0, 1, 2, 4]])
assert_array_equal(
[arr.tolist() for arr in group_deployments_by_repositories(
[[1], [2], [1, 2]], sample_deps)],
[[0, 1, 3], [0, 2], [0, 1, 2, 3]])
assert_array_equal(group_deployments_by_repositories([], sample_deps),
[np.arange(len(sample_deps))])
assert_array_equal(group_deployments_by_repositories([[1, 2], [2, 3]], | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = | isnull([[False]]) | pandas.core.dtypes.missing.isnull |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 14:59:45 2020
@author: wonwoo
"""
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVR
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.metrics import mean_squared_error, mean_squared_log_error
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.utils import class_weight, resample
from keras.models import Sequential
from keras.layers import Dense, Dropout, GaussianNoise, Conv1D
from keras.layers import LSTM
from keras.layers import Conv2D, MaxPooling2D, TimeDistributed
from keras.layers import Dense, Dropout, BatchNormalization
from keras.layers import Flatten, Reshape
from keras.layers import Embedding, Input
from keras.models import Sequential
from keras.models import load_model
from keras import optimizers
from keras.regularizers import L1L2
from keras.callbacks import ModelCheckpoint
from keras.utils import to_categorical, np_utils
import keras.backend as K
import tensorflow as tf
import numpy as np
import math
import pandas as pd
import os
import gc
import matplotlib.pyplot as plt
from xgboost import XGBClassifier
scaler = StandardScaler()
def focal_loss_fixed(y_true, y_pred, gamma=2., alpha=.25):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return (-K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))
- K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0)))
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def resampling(df_train):
train_freq = df_train['failure'].value_counts()
print(train_freq)
train_freq_mean = train_freq[1]
# Under & Over Sampling store_nbr
df_list = []
target_max = 2
multiple = 10
for i in range(0, target_max):
df_list.append(df_train[df_train['failure']==i])
for i in range(0, target_max):
if i==0:
df_list[i] = df_list[i].sample(n=int(train_freq_mean*multiple), random_state=123, replace=True)
else:
df_list[i] = df_list[i].sample(n=train_freq_mean, random_state=123, replace=True)
df_sampling_train = pd.concat(df_list)
train_freq = df_sampling_train['failure'].value_counts()
return pd.DataFrame(df_sampling_train)
def DNN(train, valid, test):
X_train = train.drop(['datetime', 'failure'], axis=1)
X_valid = valid.drop(['datetime', 'failure'], axis=1)
X_test = test.drop(['datetime', 'failure'], axis=1)
gc.collect()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.fit_transform(X_valid)
X_test = scaler.fit_transform(X_test)
Y_train = train['failure']
Y_valid = valid['failure']
Y_test = test['failure']
y_integers = Y_train
#print(y_integers)
class_weights = class_weight.compute_class_weight(None, np.unique(y_integers), y_integers)
print(class_weights)
d_class_weights = dict(enumerate(class_weights))
#d_class_weights = {0:1.0, 1:1.0}
optimizer=optimizers.Adam()
Y_train = to_categorical(Y_train)
Y_valid = to_categorical(Y_valid)
Y_test = to_categorical(Y_test)
model=Sequential()
model.add(Dense(32, input_dim=19, kernel_initializer='glorot_normal',
bias_initializer='glorot_normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, kernel_initializer='glorot_normal',
bias_initializer='glorot_normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, kernel_initializer='glorot_normal',
bias_initializer='glorot_normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, kernel_initializer='glorot_normal',
bias_initializer='glorot_normal', activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(32, kernel_initializer='glorot_normal',
bias_initializer='glorot_normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, kernel_initializer='glorot_normal',
bias_initializer='glorot_normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='sigmoid'))
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', f1_m])
model.summary()
history = model.fit(X_train, Y_train, batch_size=1000, epochs=100,
verbose=1, class_weight = d_class_weights,
validation_data=(X_valid, Y_valid), shuffle=True)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
x_epochs = range(1, len(train_loss) + 1)
plt.plot(x_epochs, train_loss, 'b', label='Training loss')
plt.plot(x_epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_test, Y_test, batch_size=1000)
print(score)
Y_pred = model.predict(X_test)
Y_pred = np.argmax(Y_pred, axis=1)
Y_test = np.argmax(Y_test, axis=1)
#Y_pred = np.argmax(Y_pred, axis=1).reshape(-1,1)
#Y_test = np.argmax(Y_test, axis=1).reshape(-1,1)
print(confusion_matrix(Y_test, Y_pred))
print(classification_report(Y_test, Y_pred, labels=[0, 1]))
def XGBClss(train, valid, test):
X_train = train.drop(['datetime', 'failure'], axis=1)
X_valid = valid.drop(['datetime', 'failure'], axis=1)
X_test = test.drop(['datetime', 'failure'], axis=1)
gc.collect()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.fit_transform(X_valid)
X_test = scaler.fit_transform(X_test)
Y_train = train['failure']
Y_valid = valid['failure']
Y_test = test['failure']
y_integers = Y_train
#print(y_integers)
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_integers), y_integers)
print(class_weights)
d_class_weights = dict(enumerate(class_weights))
clf = XGBClassifier()
parameters = {
"n_estimator" : [100, 200, 300],
"max_depth" : [ 3, 4, 5 ],
"tree_method" : ['gpu_hist'],
"predictor" : ['gpu_predictor']
}
grid = GridSearchCV(clf,
parameters, n_jobs=1,
scoring="f1_micro",
cv=3)
grid.fit(X_train, Y_train)
print(grid.best_score_)
print(grid.best_params_)
model = grid.best_estimator_
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
print(confusion_matrix(Y_test, Y_pred))
print(classification_report(Y_test, Y_pred, labels=[0, 1, 2, 3, 4]))
if __name__=="__main__":
data_type={'machineID':'uint8',
'datetime':'str',
'voltmean_24h':'float32',
'rotatemean_24h':'float32',
'pressuremean_24h':'float32',
'vibrationmean_24h':'float32',
'voltsd_24h':'float32',
'rotatesd_24h':'float32',
'pressuresd_24h':'float32',
'vibrationsd_24h':'float32',
'error1count':'uint8',
'error2count':'uint8',
'error3count':'uint8',
'error4count':'uint8',
'error5count':'uint8',
'comp1':'float32',
'comp2':'float32',
'comp3':'float32',
'comp4':'float32',
'model':'uint8',
'age':'uint8',
'failure':'str'}
features_path = os.path.join("data/labeled_features.csv")
df_data = pd.read_csv(features_path, engine='c',
dtype=data_type, parse_dates=['datetime'])
df_data['datetime'] = df_data['datetime'].dt.strftime('%Y-%m-%d %H:%M:%s')
test_results = []
models = []
df_data['failure']=np.where(df_data['failure']=='none',0,1)
df_data['failure']=df_data['failure'].astype('int')
del df_data['machineID']
gc.collect()
# make test and training splits
test_date = pd.to_datetime('2015-10-01 00:00:00')
test_data = pd.DataFrame(df_data.loc[pd.to_datetime(df_data['datetime']) >= test_date])
train_data = pd.DataFrame(df_data.loc[pd.to_datetime(df_data['datetime']) < test_date])
validation_date = | pd.to_datetime('2015-8-01 01:00:00') | pandas.to_datetime |
import sqlite3
import json
import pandas as pd
class MamphiDataFetcher:
mamphi_db = ""
def __init__(self, mamphi_db=mamphi_db):
self.mamphi_db = mamphi_db
def fetch_center(self):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Zentren"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
results_json = json.dumps([dict(ix) for ix in results])
return results_json
def fetch_consent(self):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Informed_consent"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
results_json = json.dumps([dict(ix) for ix in results])
return results_json
def fetch_rand_week(self, week):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Random_Woche_{}".format(week)
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
results_json = json.dumps([dict(ix) for ix in results])
return results_json
def get_center_by_land(self, land):
if land == "Germany":
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Zentren WHERE Land = 'D'"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
german_center = json.dumps([dict(ix) for ix in results])
# number_patient = len(list_patient)
return german_center
elif land == "UK":
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Zentren WHERE Land = 'GB'"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
uk_center = json.dumps([dict(ix) for ix in results])
# number_patient = len(list_patient)
return uk_center
def update_zentren(self, center_json):
"""
:rtype: object
:param center_json: json string of the value to be inserted in the database
"""
center = json.loads(center_json)
# Compute center Id manually
values = []
if center['Land'] == "D":
german_center = self.get_center_list_country(country="D")
german_center = pd.read_json(german_center)
zentrum_id = german_center['Zentrum_Id'].max() + 1
values.append(zentrum_id)
else:
uk_center = self.get_center_list_country(country="GB")
uk_center = pd.read_json(uk_center)
zentrum_id = uk_center['Zentrum_Id'].max() + 1
values.append(zentrum_id)
for idx in center.values():
values.append(idx)
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "INSERT INTO Zentren VALUES" + str(tuple(values))
try:
cursor.execute(statement)
conn.commit()
print(cursor.rowcount, "record inserted.")
except:
conn.rollback()
conn.close()
def update_consent(self, consent_json):
consent_item = json.loads(consent_json)
values = []
# compute patient id manually
consent_list = self.fetch_consent()
consent_list = pd.read_json(consent_list)
patient_id = consent_list['Patient_Id'].max() + 1
values.append(patient_id)
for idx in consent_item.values():
values.append(idx)
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "INSERT INTO Informed_consent VALUES" + str(tuple(values))
try:
cursor.execute(statement)
conn.commit()
print(cursor.rowcount, "record inserted.")
except:
conn.rollback()
conn.close()
def update_rand_week(self, value, week):
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "INSERT INTO Random_Week_{}".format(week) + value
try:
cursor.execute(statement)
conn.commit()
print(cursor.rowcount, "record inserted.")
except:
conn.rollback()
conn.close()
def get_center_list_country(self, country):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
statement = "SELECT * FROM Zentren WHERE Land = 'D'" if country == "Deutschland" \
else "SELECT * FROM Zentren WHERE Land = 'GB'"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
# results_json = json.dumps([dict(ix) for ix in results])
list_patient = json.dumps([dict(ix) for ix in results])
# number_patient = len(list_patient)
return list_patient
def fetch_consent_list(self, consent):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
if consent is "missing":
statement = "SELECT * FROM Informed_consent WHERE Einwilligung = 'nan' AND Datum != 'NaT'"
elif consent is "incomplete":
statement = "SELECT * FROM Informed_consent WHERE Einwilligung = 'nan'"
else:
statement = "SELECT * FROM Informed_consent WHERE Datum > '2019.06.03'"
cursor.execute(statement)
results = cursor.fetchall()
conn.commit()
conn.close()
results_json = json.dumps([dict(ix) for ix in results])
return results_json
def get_number_of_patient_per_center_by_week(self, week):
results = self.fetch_rand_week(week=week)
data = pd.read_json(results)
number_patient_per_center = data.groupby(['Zentrum'])['Patient_Id'].count()
center = [idx for idx in number_patient_per_center.index]
number_of_patient = [value for value in number_patient_per_center.values]
df = pd.DataFrame({'Zentrum': center, 'Number_Of_Patient': number_of_patient})
weekly_list = df.to_json(orient='records')
return weekly_list
def get_number_patient_per_center_per_country_by_week(self, week):
"""
:return: Return list of patient per center in both country
"""
weekly_list = self.get_number_of_patient_per_center_by_week(week=week)
load_list = json.loads(weekly_list)
list_german = []
list_uk = []
for el in load_list:
if el['Zentrum'] < 200:
list_german.append(el)
else:
list_uk.append(el)
results = {'Germany': list_german, 'UK': list_uk}
return json.dumps(results)
def fetch_center_ids(self):
conn = sqlite3.connect(self.mamphi_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
query = "SELECT Zentrum_Id FROM Zentren"
response = cursor.execute(query)
results = response.fetchall()
center_ids = [dict(idx) for idx in results]
return json.dumps(center_ids)
def remove_center_by_id(self, center_id):
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "DELETE FROM Zentren WHERE Zentrum_Id = {}".format(center_id)
cursor.execute(statement)
conn.commit()
conn.close()
def remove_consent_by_id(self, patient_id):
conn = sqlite3.connect(self.mamphi_db)
cursor = conn.cursor()
statement = "DELETE FROM Informed_consent WHERE Patient_Id = {}".format(patient_id)
cursor.execute(statement)
conn.commit()
conn.close()
print("An item have been removed")
def retrieve_centres_with_number_of_patient(self):
week1 = self.get_number_of_patient_per_center_by_week(week=1)
week2 = self.get_number_of_patient_per_center_by_week(week=2)
week1_df = pd.read_json(week1)
week2_df = pd.read_json(week2)
sum_weekly_records = pd.concat([week1_df, week2_df], ignore_index=True)
records = sum_weekly_records.groupby(['Zentrum']).sum()
centres = self.fetch_center()
centres = pd.read_json(centres)
centres['NP'] = 0
for idx in records.index:
centres.loc[centres['Zentrum_Id'] == idx, 'NP'] = records['Number_Of_Patient'][idx]
return centres.to_json(orient='records')
def retrieve_monitoring_plan(self):
centres = self.retrieve_centres_with_number_of_patient()
data = json.loads(centres)
for item in data:
if 0 < item['NP'] < 5:
visites = | pd.date_range(start='6/1/2019', periods=5, freq='3M') | pandas.date_range |
"""Test functions in owid.datautils.dataframes module.
"""
import numpy as np
import pandas as pd
from pytest import warns
from typing import Any, Dict
from owid.datautils import dataframes
class TestCompareDataFrames:
def test_with_large_absolute_tolerance_all_equal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_large_absolute_tolerance_all_unequal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=0.9,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [False, False]}))
def test_with_large_absolute_tolerance_mixed(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3.1]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [True, False]}))
def test_with_large_relative_tolerance_all_equal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.5,
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_large_relative_tolerance_all_unequal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.3,
).equals(pd.DataFrame({"col_01": [False, False]}))
def test_with_large_relative_tolerance_mixed(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.4,
).equals(pd.DataFrame({"col_01": [False, True]}))
def test_with_dataframes_of_equal_values_but_different_indexes(self):
# Even if dataframes are not identical, compare_dataframes should return all Trues (since it does not care about
# indexes, only values).
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
df2=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "c"]}).set_index(
"col_02"
),
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_two_dataframes_with_object_columns_with_nans(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
df2=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
).equals(pd.DataFrame({"col_01": [True, True, True]}))
class TestAreDataFramesEqual:
def test_on_equal_dataframes_with_one_integer_column(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 3]}),
)[0]
def test_on_almost_equal_dataframes_but_differing_by_one_element(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 0]}),
)[0]
def test_on_almost_equal_dataframes_but_differing_by_type(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 3.0]}),
)[0]
def test_on_equal_dataframes_containing_nans(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, np.nan]}),
df2=pd.DataFrame({"col_01": [1, 2, np.nan]}),
)[0]
def test_on_equal_dataframes_containing_only_nans(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [np.nan, np.nan]}),
df2=pd.DataFrame({"col_01": [np.nan, np.nan]}),
)[0]
def test_on_equal_dataframes_both_empty(self):
assert dataframes.are_equal(df1=pd.DataFrame(), df2=pd.DataFrame())[0]
def test_on_equal_dataframes_with_various_types_of_columns(self):
assert dataframes.are_equal(
df1=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
df2=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
)[0]
def test_on_almost_equal_dataframes_but_columns_sorted_differently(self):
assert not dataframes.are_equal(
df1=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
df2=pd.DataFrame(
{
"col_02": [0.1, 0.2],
"col_01": [1, 2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
)[0]
def test_on_unequal_dataframes_with_all_columns_different(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": [0.1, 0.2]}),
df2=pd.DataFrame({"col_03": [0.1, 0.2], "col_04": [1, 2]}),
)[0]
def test_on_unequal_dataframes_with_some_common_columns(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": [0.1, 0.2]}),
df2=pd.DataFrame({"col_01": [1, 2], "col_03": [1, 2]}),
)[0]
def test_on_equal_dataframes_given_large_absolute_tolerance(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [10, 20]}),
df2=pd.DataFrame({"col_01": [11, 21]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
)[0]
def test_on_unequal_dataframes_given_large_absolute_tolerance(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [10, 20]}),
df2=pd.DataFrame({"col_01": [11, 21]}),
absolute_tolerance=0.9,
relative_tolerance=1e-8,
)[0]
def test_on_equal_dataframes_given_large_relative_tolerance(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1]}),
df2=pd.DataFrame({"col_01": [2]}),
absolute_tolerance=1e-8,
relative_tolerance=0.5,
)[0]
def test_on_unequal_dataframes_given_large_relative_tolerance(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1]}),
df2=pd.DataFrame({"col_01": [2]}),
absolute_tolerance=1e-8,
relative_tolerance=0.49,
)[0]
def test_on_equal_dataframes_with_non_numeric_indexes(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
df2=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
)[0]
def test_on_dataframes_of_equal_values_but_different_indexes(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
df2=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "c"]}).set_index(
"col_02"
),
)[0]
def test_on_dataframes_with_object_columns_with_nans(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
df2=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
)[0]
class TestGroupbyAggregate:
def test_default_aggregate_single_groupby_column_as_string(self):
df_in = pd.DataFrame(
{
"year": [2001, 2003, 2003, 2003, 2002, 2002],
"value_01": [1, 2, 3, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [1, 11, 9],
}
).set_index("year")
assert dataframes.groupby_agg(
df_in,
"year",
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_single_groupby_column_as_list(self):
df_in = pd.DataFrame(
{
"year": [2001, 2003, 2003, 2003, 2002, 2002],
"value_01": [1, 2, 3, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [1, 11, 9],
}
).set_index("year")
assert dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_with_some_nans_ignored(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
}
).set_index("year")
assert dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_with_some_nans_ignored_different_types(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": ["a", "b", "c", "d", "e", "f"],
"value_03": [True, False, False, True, True, False],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
"value_02": ["a", "bc", "def"],
"value_03": [1, 0, 2],
}
).set_index("year")
assert dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_with_some_nans_ignored_different_types_and_more_nans(
self,
):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, True, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
"value_02": [0, "b", "def"],
"value_03": [0, 0, 2],
}
).set_index("year")
df_out["value_03"] = df_out["value_03"].astype(object)
assert dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_with_num_allowed_nans_zero(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, True, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [np.nan, np.nan, 15.0],
"value_02": [np.nan, np.nan, "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, np.nan], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=0,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_default_aggregate_with_num_allowed_nans_one(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
"value_02": [0, "b", "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[0, 0, np.nan], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=1,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_default_aggregate_with_num_allowed_nans_two(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
"value_02": [0, "b", "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[0, 0, 1], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=2,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_default_aggregate_with_num_allowed_nans_the_length_of_the_dataframe(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2004, 2004, 2004, 2004],
"value_01": [np.nan, 2, np.nan, 4, 5, 6, 7],
"value_02": [np.nan, "b", np.nan, "d", "e", "f", "g"],
"value_03": [np.nan, False, False, True, np.nan, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2004],
"value_01": [0.0, 2.0, 22.0],
"value_02": [0, "b", "defg"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[0, 0, 1], index=[2001, 2002, 2004], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=len(df_in),
),
df2=df_out,
)[0]
def test_default_aggregate_with_frac_allowed_nans_zero(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, True, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [np.nan, np.nan, 15.0],
"value_02": [np.nan, np.nan, "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, np.nan], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=0,
),
df2=df_out,
)[0]
def test_default_aggregate_with_frac_allowed_nans_half(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [np.nan, 2.0, 15.0],
"value_02": [np.nan, "b", "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, np.nan], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=0.5,
),
df2=df_out,
)[0]
def test_default_aggregate_with_frac_allowed_nans_two_thirds(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [np.nan, 2.0, 15.0],
"value_02": [np.nan, "b", "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, 1], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=0.67,
),
df2=df_out,
)[0]
def test_default_aggregate_with_frac_allowed_nans_one(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003, 2004, 2004, 2004, 2004],
"value_01": [np.nan, 2, np.nan, 4, 5, 6, 7, np.nan, np.nan, np.nan],
"value_02": [np.nan, "b", np.nan, "d", "e", "f", "g", "h", "i", "j"],
"value_03": [
np.nan,
False,
False,
True,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
True,
],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003, 2004],
"value_01": [0, 2.0, 15.0, 7],
"value_02": [0, "b", "def", "ghij"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[0, 0, 1, 1], index=[2001, 2002, 2003, 2004], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_default_aggregate_with_both_num_allowed_nans_and_frac_allowed_nans(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003, 2004, 2004, 2004, 2004],
"value_01": [np.nan, 2, np.nan, 4, 5, 6, 7, np.nan, np.nan, np.nan],
"value_02": [np.nan, "b", np.nan, "d", "e", "f", "g", "h", "i", "j"],
"value_03": [
np.nan,
False,
False,
True,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
True,
],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003, 2004],
"value_01": [np.nan, 2.0, 15.0, np.nan],
"value_02": [np.nan, "b", "def", "ghij"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, np.nan, np.nan], index=[2001, 2002, 2003, 2004], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=2,
frac_allowed_nans=0.5,
),
df2=df_out,
)[0]
def test_default_aggregate_with_two_groupby_columns(self):
df_in = pd.DataFrame(
{
"country": [
"country_a",
"country_a",
"country_a",
"country_b",
"country_b",
"country_c",
],
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [1, 2, 3, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"country": ["country_a", "country_a", "country_b", "country_c"],
"year": [2001, 2002, 2003, 2003],
"value_01": [1, 5, 9, 6],
}
).set_index(["country", "year"])
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["country", "year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_custom_aggregate(self):
aggregations = {"value_01": "sum", "value_02": "mean"}
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [1, 2, 3, 4, 5, np.nan],
"value_02": [1, 2, 3, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [1.0, 5.0, np.nan],
"value_02": [1, 2.5, 7.5],
}
).set_index("year")
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=aggregations,
num_allowed_nans=0,
frac_allowed_nans=None,
),
df2=df_out,
)
class TestMultiMerge:
df1 = pd.DataFrame({"col_01": ["aa", "ab", "ac"], "col_02": ["ba", "bb", "bc"]})
def test_merge_identical_dataframes(self):
df1 = self.df1.copy()
df2 = self.df1.copy()
df3 = self.df1.copy()
assert dataframes.multi_merge(
[df1, df2, df3], how="inner", on=["col_01", "col_02"]
).equals(df1)
def test_inner_join_with_non_overlapping_dataframes(self):
df1 = self.df1.copy()
df2 = pd.DataFrame({"col_01": ["ad", "ae"]})
df3 = pd.DataFrame({"col_01": ["af"], "col_03": ["ca"]})
# For some reason the order of columns changes on the second merge.
df_out = pd.DataFrame({"col_02": [], "col_01": [], "col_03": []}, dtype=str)
assert dataframes.are_equal(
df1=dataframes.multi_merge([df1, df2, df3], how="inner", on="col_01"),
df2=df_out,
)
def test_outer_join_with_non_overlapping_dataframes(self):
df1 = self.df1.copy()
df2 = pd.DataFrame({"col_01": ["ad"]})
df3 = pd.DataFrame({"col_01": ["ae"]})
df_out = pd.DataFrame(
{
"col_01": ["aa", "ab", "ac", "ad", "ae"],
"col_02": ["ba", "bb", "bc", np.nan, np.nan],
}
)
assert dataframes.are_equal(
df1=dataframes.multi_merge([df1, df2, df3], how="outer", on="col_01"),
df2=df_out,
)[0]
def test_left_join(self):
df1 = self.df1.copy()
df2 = pd.DataFrame(
{
"col_01": ["aa", "ab", "ad"],
"col_02": ["ba", "bB", "bc"],
"col_03": [1, 2, 3],
}
)
# df_12 = pd.DataFrame({'col_01': ['aa', 'ab', 'ac'], 'col_02': ['ba', 'bb', 'bc'],
# 'col_03': [1, np.nan, np.nan]})
df3 = pd.DataFrame({"col_01": [], "col_02": [], "col_04": []})
df_out = pd.DataFrame(
{
"col_01": ["aa", "ab", "ac"],
"col_02": ["ba", "bb", "bc"],
"col_03": [1, np.nan, np.nan],
"col_04": [np.nan, np.nan, np.nan],
}
)
assert dataframes.multi_merge(
[df1, df2, df3], how="left", on=["col_01", "col_02"]
).equals(df_out)
def test_right_join(self):
df1 = self.df1.copy()
df2 = pd.DataFrame(
{
"col_01": ["aa", "ab", "ad"],
"col_02": ["ba", "bB", "bc"],
"col_03": [1, 2, 3],
}
)
# df12 = pd.DataFrame({'col_01': ['aa', 'ab', 'ad'], 'col_02': ['ba', 'bB', 'bc'], 'col_03': [1, 2, 3]})
df3 = pd.DataFrame(
{"col_01": ["aa", "ae"], "col_02": ["ba", "be"], "col_04": [4, 5]}
)
df_out = pd.DataFrame(
{
"col_01": ["aa", "ae"],
"col_02": ["ba", "be"],
"col_03": [1, np.nan],
"col_04": [4, 5],
}
)
assert dataframes.multi_merge(
[df1, df2, df3], how="right", on=["col_01", "col_02"]
).equals(df_out)
class TestMapSeries:
mapping = {
"country_01": "Country 1",
"country_02": "Country 2",
}
def test_all_countries_mapped_and_all_mappings_used(self):
series_in = pd.Series(["country_01", "country_02"])
series_out = pd.Series(["Country 1", "Country 2"])
assert dataframes.map_series(series=series_in, mapping=self.mapping).equals(
series_out
)
def test_one_country_missing_in_mapping(self):
series_in = | pd.Series(["country_01", "country_02", "country_03"]) | pandas.Series |
from eflow.utils.sys_utils import dict_to_json_file,json_file_to_dict
from eflow.utils.language_processing_utils import get_synonyms
from eflow._hidden.custom_exceptions import UnsatisfiedRequirments
from eflow._hidden.constants import BOOL_STRINGS
import copy
import numpy as np
import pandas as pd
from dateutil import parser
from IPython.display import display
from IPython.display import clear_output
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, eFlow"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = "EricCacciavillani"
__email__ = "<EMAIL>"
class DataFrameTypes:
"""
Separates the features based off of dtypes to better keep track of
feature types and helps make type assertions.
"""
def __init__(self,
df=None,
target_feature=None,
ignore_nulls=False,
fix_numeric_features=False,
fix_string_features=False,
notebook_mode=False):
"""
Args:
df: pd.DataFrame
Pandas dataframe object.
target_feature: string
If the project is using a supervised learning approach we can
specify the target column. (Note: Not required)
ignore_nulls: bool
If set to true than a temporary dataframe is created with each
feature removes it's nan values to assert what data type the series
object would be without nans existing inside it.
fix_numeric_features: bool
Will attempt to convert all numeric features to the most proper
numerical types.
fix_string_features: bool
Will attempt to convert all string features to ALL proper types.
notebook_mode: bool
Boolean value to determine if any notebook functions can be used here.
"""
# Init an empty dataframe
if df is None:
df = pd.DataFrame({})
# Grab features based on there types
self.__bool_features = set(
df.select_dtypes(include=["bool"]).columns)
self.__string_features = set(
df.select_dtypes(include=["object"]).columns)
self.__categorical_features = set(
df.select_dtypes(include=["category"]).columns)
self.__integer_features = set(
df.select_dtypes(include=["int"]).columns)
self.__float_features = set(
df.select_dtypes(include=["float"]).columns)
self.__datetime_features = set(
df.select_dtypes(include=["datetime"]).columns)
null_features = df.isnull().sum()
null_features[null_features == df.shape[0]].index.to_list()
self.__null_only_features = set(null_features[null_features == df.shape[0]].index.to_list())
del null_features
# Target feature for machine learning projects
self.__target_feature = None
# Feature's colors
self.__feature_value_color_dict = dict()
# Category/Label encoders
self.__label_encoder = dict()
self.__label_decoder = dict()
# Feature values representation
self.__feature_value_representation = dict()
# Dummy encoded feature dictionaries
self.__dummy_encoded_features = dict()
# Feature's labels and bins
self.__feature_labels_bins_dict = dict()
# Data type assertions without nulls
if ignore_nulls and df.isnull().values.any():
self.fix_nan_features(df)
# Attempt to init target column
if target_feature:
if target_feature in df.columns:
self.__target_feature = target_feature
else:
raise KeyError(f"The given target feature: \'{target_feature}\' does not exist!")
# Error checking; flag error; don't disrupt runtime
features_not_captured = set(df.columns)
all_features = (self.__float_features | self.__integer_features) | \
self.__string_features | \
self.__bool_features | \
self.__datetime_features | \
self.__categorical_features | \
self.__null_only_features
for col_feature in all_features:
features_not_captured.remove(col_feature)
if features_not_captured:
print("ERROR UNKNOWN FEATURE(S) TYPE(S) FOUND!\n{0}".format(
features_not_captured))
if fix_string_features:
self.fix_string_features(df,
notebook_mode)
if fix_numeric_features:
self.fix_numeric_features(df,
notebook_mode)
# --- Getters ---
def numerical_features(self,
exclude_target=False):
"""
Gets all numerical features chosen by the object.
Args:
exclude_target: bool
If the target feature is an numerical (int/float/bool); then it will be ignored
when passing back the set.
Returns:
Returns a set of all numerical features chosen by the object.
"""
tmp_set = self.__float_features | self.__integer_features | self.__bool_features
if exclude_target:
# Target feature never init
if self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return tmp_set
def non_numerical_features(self,
exclude_target=False):
"""
Gets all non-numerical features chosen by the object.
Args:
exclude_target: bool
If the target feature is an numerical (int/float/bool); then it will be ignored
when passing back the set.
Returns:
Returns a set of all non numerical features chosen by the object.
"""
tmp_set = self.all_features() ^ self.numerical_features()
if exclude_target:
# Target feature never init
if self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return tmp_set
def continuous_numerical_features(self,
exclude_target=False):
"""
Gets all numerical features that are continuous (int/float).
Args:
exclude_target: bool
If the target feature is an numerical (int/float); then it will be ignored
when passing back the set.
Returns:
Returns a set of all numerical features chosen by the object.
"""
tmp_set = self.__float_features | self.__integer_features
if exclude_target:
# Target feature never init
if self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return tmp_set
def non_continuous_numerical_features(self,
exclude_target=False):
"""
Gets all numerical features that are not continuous (bool)
Args:
exclude_target: bool
If the target feature is a bool; then it will be ignored
when passing back the set.
Returns:
Returns a set of all numerical features chosen by the object.
"""
tmp_set = self.__bool_features
if exclude_target:
# Target feature never init
if self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return tmp_set
def continuous_features(self,
exclude_target=False):
"""
Gets all numerical features chosen by the object.
Args:
exclude_target: bool
If the target feature is an numerical (int/float/time); then it will be ignored
when passing back the set.
Returns:
Returns a set of all numerical features chosen by the object.
"""
tmp_set = self.__float_features | self.__integer_features | self.__datetime_features
if exclude_target:
# Target feature never init
if self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return tmp_set
def non_continuous_features(self,
exclude_target=False):
"""
Gets all numerical features chosen by the object.
Args:
exclude_target: bool
If the target feature is an numerical (int/float/time); then it will be ignored
when passing back the set.
Returns:
Returns a set of all numerical features chosen by the object.
"""
tmp_set = self.all_features() ^ self.continuous_features()
if exclude_target:
# Target feature never init
if self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return tmp_set
def integer_features(self,
exclude_target=False):
"""
All integer features chosen by df_features.
Args:
exclude_target: bool
If the target feature is an integer; then it will be ignored
when passing back the set.
Returns:
Returns a set of all integer features chosen by the object.
"""
if exclude_target:
tmp_set = copy.deepcopy(self.__integer_features)
# Target feature never init
if not self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return copy.deepcopy(self.__integer_features)
def float_features(self,
exclude_target=False):
"""
All float features chosen by df_features.
Args:
exclude_target: bool
If the target feature is an float; then it will be ignored
when passing back the set.
Returns:
Returns a set of all float features chosen by the object.
"""
if exclude_target:
tmp_set = copy.deepcopy(self.__float_features)
# Target feature never init
if not self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return copy.deepcopy(self.__float_features)
def categorical_features(self,
exclude_target=False):
"""
All categorical features chosen by df_features.
Args:
exclude_target: bool
If the target feature is an categorical; then it will be ignored
when passing back the set.
Returns:
Returns a set of all categorical features chosen by the object.
"""
if exclude_target:
tmp_set = copy.deepcopy(self.__categorical_features)
# Target feature never init
if not self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return copy.deepcopy(self.__categorical_features)
def string_features(self,
exclude_target=False):
"""
All string features chosen by df_features.
Args:
exclude_target: bool
If the target feature is an string; then it will be ignored
when passing back the set.
Returns:
Returns a set of all string features chosen by the object.
"""
if exclude_target:
tmp_set = copy.deepcopy(self.__string_features)
# Target feature never init
if not self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return copy.deepcopy(self.__string_features)
def bool_features(self,
exclude_target=False):
"""
All bool features chosen by df_features.
Args:
exclude_target: bool
If the target feature is an bool; then it will be ignored
when passing back the set.
Returns:
Returns a set of all bool features chosen by the object.
"""
if exclude_target:
tmp_set = copy.deepcopy(self.__bool_features)
# Target feature never init
if not self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return copy.deepcopy(self.__bool_features)
def datetime_features(self,
exclude_target=False):
"""
All datetime features chosen by df_features.
Args:
exclude_target: bool
If the target feature is an datetime; then it will be ignored
when passing back the set.
Returns:
Returns a set of all datetime features chosen by the object.
"""
if exclude_target:
tmp_set = copy.deepcopy(self.__datetime_features)
# Target feature never init
if not self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return copy.deepcopy(self.__datetime_features)
def null_only_features(self,
exclude_target=False):
if exclude_target:
tmp_set = copy.deepcopy(self.__null_only_features)
# Target feature never init
if not self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return copy.deepcopy(self.__null_only_features)
def all_features(self,
exclude_target=False):
"""
Returns all features found in the dataset.
Args:
exclude_target: bool
If the target feature is an datetime; then it will be ignored
when passing back the set.
Returns:
Returns all found features.
"""
all_features = (self.__float_features | self.__integer_features) | \
self.__string_features | \
self.__bool_features | \
self.__datetime_features | \
self.__categorical_features | \
self.__null_only_features
if exclude_target:
tmp_set = copy.deepcopy(all_features)
# Target feature never init
if not self.__target_feature:
raise KeyError("Target feature was never initialized")
# Check if target exist in set
if self.__target_feature in tmp_set:
tmp_set.remove(self.__target_feature)
return tmp_set
else:
return copy.deepcopy(all_features)
def get_feature_type(self,
feature_name):
"""
Return's a feature's type as a string
Args:
feature_name: str
The given's feature's name.
Returns:
Return's a feature's type as a string
"""
if feature_name in self.__float_features:
return "float"
elif feature_name in self.__bool_features:
return "bool"
elif feature_name in self.__integer_features:
return "integer"
elif feature_name in self.__categorical_features:
return "categorical"
elif feature_name in self.__string_features:
return "string"
elif feature_name in self.__datetime_features:
return "datetime"
elif feature_name in self.__null_only_features:
return "null only"
else:
raise KeyError(f"Feature '{feature_name}' can't be found in the set!")
def target_feature(self):
"""
Gets the target feature.
Returns:
Returns the target feature.
"""
return copy.deepcopy(self.__target_feature)
def get_label_decoder(self):
"""
Gets the dict encoder for category to string relationship.
Returns:
Returns the encoder dict object.
"""
return copy.deepcopy(self.__label_decoder)
def get_label_encoder(self):
"""
Gets the dict encoder for string to category relationship.
Returns:
Returns the encoder dict object.
"""
return copy.deepcopy(self.__label_encoder)
def get_feature_colors(self,
feature_name):
"""
Get's the color values for that given feature.
Args:
feature_name: str
The given feature name of the
Returns:
Returns the value's color and dictionary; returns None if the feature name
is not saved in the feature value color dict.
"""
if feature_name in self.__feature_value_color_dict.keys():
return copy.deepcopy(self.__feature_value_color_dict[feature_name])
else:
return None
def get_all_feature_colors(self):
"""
Get's the entire dict of the feature's value colors.
Returns:
Returns a copy of feature's value colors.
"""
return copy.deepcopy(self.__feature_value_color_dict)
def get_feature_binning(self,
feature_name):
"""
Get's the feature's bin and labels for that given feature.
Args:
feature_name: str
The given feature name of the feature
Returns:
Returns the bin and labels for the given feature; returns None if
the feature name is not saved in the feature value color dict.
"""
if feature_name in self.__feature_labels_bins_dict:
return copy.deepcopy(self.__feature_labels_bins_dict[feature_name])
else:
return None
def get_all_feature_binning(self):
"""
Get's the entire dict of the feature's bins and labels.
Returns:
Returns a copy of the labels and bins for the given feature.
"""
return copy.deepcopy(self.__feature_labels_bins_dict)
def get_feature_value_representation(self):
"""
Get's the entire dict of the feature's value colors.
Returns:
Returns a copy of feature's value colors.
"""
return copy.deepcopy(self.__feature_value_representation)
def get_dummy_encoded_features(self):
"""
Get's all dummy encoded relationships.
"""
return copy.deepcopy(self.__dummy_encoded_features)
# --- Setters and Appending ---
def set_target_feature(self,
feature_name):
"""
Sets the value.
Args:
feature_name:
Set the target feature.
"""
self.__target_feature = copy.deepcopy(feature_name)
def set_feature_to_bool(self,
feature_name):
"""
Moves feature to bool set.
Args:
feature_name:
Feature name or a list of feature names to move to given set.
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.remove_feature(name)
self.__bool_features.add(name)
def set_feature_to_integer(self,
feature_name):
"""
Moves feature to integer set.
Args:
feature_name:
Feature name or a list of feature names to move to given set.
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.remove_feature(name)
self.__integer_features.add(name)
def set_feature_to_float(self,
feature_name):
"""
Moves feature to float set.
Args:
feature_name:
Feature name or a list of feature names to move to given set.
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.remove_feature(name)
self.__float_features.add(name)
def set_feature_to_string(self,
feature_name):
"""
Moves feature to string set.
Args:
feature_name:
Feature name or a list of feature names to move to given set.
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.remove_feature(name)
self.__string_features.add(name)
def set_feature_to_categorical(self,
feature_name):
"""
Moves feature to categorical set.
Args:
feature_name:
Feature name or a list of feature names to move to given set.
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.remove_feature(name)
self.__categorical_features.add(name)
def set_feature_to_datetime(self,
feature_name):
"""
Moves feature to datetime set.
Args:
feature_name:
Feature name or a list of feature names to move to given set.
"""
if isinstance(feature_name,str):
feature_name = [feature_name]
for name in feature_name:
self.remove_feature(name)
self.__datetime_features.add(name)
def set_feature_to_null_only_features(self,
feature_name):
"""
Moves feature to only null series data feature set.
Args:
feature_name:
Feature name or a list of feature names to move to given set.
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.remove_feature(name)
self.__null_only_features.add(name)
def set_feature_binning(self,
feature_name,
bins,
labels):
if not isinstance(labels,list):
labels = list(labels)
if not isinstance(bins,list):
bins = list(bins)
self.__feature_labels_bins_dict[feature_name] = dict()
self.__feature_labels_bins_dict[feature_name]["bins"] = bins
self.__feature_labels_bins_dict[feature_name]["labels"] = labels
def add_new_bool_feature(self,
feature_name):
"""
Adds a new feature/feature(s) to the feature set bool
Args:
feature_name: str
Name of the new feature
"""
if isinstance(feature_name,str):
feature_name = [feature_name]
for name in feature_name:
self.__bool_features.add(name)
def add_new_string_feature(self,
feature_name):
"""
Adds a new feature/feature(s) to the feature set string
Args:
feature_name: str
Name of the new feature
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.__string_features.add(name)
def add_new_integer_feature(self,
feature_name):
"""
Adds a new feature/feature(s) to the feature set integer
Args:
feature_name: str
Name of the new feature
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.__integer_features.add(name)
def add_new_float_feature(self,
feature_name):
"""
Adds a new feature/feature(s) to the feature set float
Args:
feature_name: str
Name of the new feature
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.__float_features.add(name)
def add_new_categorical_feature(self,
feature_name):
"""
Adds a new feature/feature(s) to the feature set categorical
Args:
feature_name: str
Name of the new feature
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.__categorical_features.add(name)
def add_new_null_only_feature(self,
feature_name):
"""
Adds a new feature/feature(s) to the feature set null only features
Args:
feature_name: str
Name of the new feature
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.__null_only_features.add(name)
def add_new_datetime_feature(self,
feature_name):
"""
Adds a new feature/feature(s) to the feature set datetime
Args:
feature_name: str
Name of the new feature
"""
if isinstance(feature_name, str):
feature_name = [feature_name]
for name in feature_name:
self.__datetime_features.add(name)
def set_feature_colors(self,
feature_value_color_dict):
"""
Passing in a dictionary of feature names to value to hex color to
save to this object. Error checks the dict for proper values.
Args:
feature_value_color_dict:
Dictionary of feature names to value to hex color.
Ex: feature_value_color_dict["Sex"]["Male"] = "#ffffff"
"""
# -----
for feature_name, color_dict in feature_value_color_dict.items():
if feature_name not in self.all_features():
raise UnsatisfiedRequirments(f"The feature name '{feature_name}' "
+ "was not found in any of the past"
" feature type sets!")
if isinstance(feature_name,str):
# -----
if isinstance(color_dict,dict):
for feature_value, hex_color in color_dict.items():
if not isinstance(hex_color, str):
raise UnsatisfiedRequirments(f"The feature value must be a string; not a {type(hex_color)}")
self.__feature_value_color_dict[feature_name] = color_dict
# -----
# elif isinstance(color_dict, str):
# try:
# sns.color_palette(color_dict)
# except:
# raise ValueError(f"The value {color_dict} is not a proper seaborn color template!")
#
# self.__feature_value_color_dict[feature_name] = color_dict
# -----
else:
raise UnsatisfiedRequirments("Expected to extract out a "
+ f"dict from the feature '{feature_name}' "
+ "values with accoiated color hex "
"values. Instead was found to"
+ f" be a {type(color_dict)}.")
else:
raise UnsatisfiedRequirments(f"Expect the feature name to be a "
+ f"string instead was found to be {type(feature_name)}")
def set_feature_value_representation(self,
feature_value_representation):
for feature_name in feature_value_representation:
if feature_name not in self.__string_features:
raise UnsatisfiedRequirments(f"Feature value assertions must be of type string.")
if feature_name not in self.all_features():
raise UnsatisfiedRequirments(f"'{feature_name}' doesn't exist in any features.")
self.__feature_value_representation = copy.deepcopy(feature_value_representation)
def set_feature_to_dummy_encoded(self,
feature_name,
dummy_encoded_list):
self.__dummy_encoded_features[feature_name] = dummy_encoded_list
for bool_feature in dummy_encoded_list:
self.__bool_features.add(bool_feature)
# --- Functions ---
def feature_types_dict(self):
feature_types = dict()
# -----
for feature_name in self.__string_features:
feature_types[feature_name] = "string"
for feature_name in self.__bool_features:
feature_types[feature_name] = "bool"
for feature_name in self.__integer_features:
feature_types[feature_name] = "integer"
for feature_name in self.__float_features:
feature_types[feature_name] = "float"
for feature_name in self.__datetime_features:
feature_types[feature_name] = "datetime"
for feature_name in self.__categorical_features:
feature_types[feature_name] = "categorical"
for feature_name in self.__null_only_features:
feature_types[feature_name] = "null_only"
return feature_types
def feature_types_dataframe(self):
features = list()
feature_types = list()
# -----
for feature_name in self.__string_features:
features.append(feature_name)
feature_types.append("string")
for feature_name in self.__bool_features:
features.append(feature_name)
feature_types.append("bool")
for feature_name in self.__integer_features:
features.append(feature_name)
feature_types.append("integer")
for feature_name in self.__float_features:
features.append(feature_name)
feature_types.append("float")
for feature_name in self.__datetime_features:
features.append(feature_name)
feature_types.append("datetime")
for feature_name in self.__categorical_features:
features.append(feature_name)
feature_types.append("category")
for feature_name in self.__null_only_features:
features.append(feature_name)
feature_types.append("null only")
dtypes_df = | pd.DataFrame({'Data Types': feature_types}) | pandas.DataFrame |
import pandas as pd
import networkx as nx
import pytest
from kgextension.feature_selection import hill_climbing_filter, hierarchy_based_filter, tree_based_filter
from kgextension.generator import specific_relation_generator, direct_type_generator
class TestHillCLimbingFilter:
def test1_high_beta(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_generator_data_low_beta(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
input_df = specific_relation_generator(
df, columns=['link'], hierarchy_relation='http://www.w3.org/2004/02/skos/core#broader')
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test2_expected.csv")
output_df = hill_climbing_filter(input_df, 'link_in_boolean_http://dbpedia.org/resource/Category:Prefectures_in_France', beta=0.05, k=3)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test3_nan(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_expected.csv")
output_df = hill_climbing_filter(input_df, 'class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test4_callable_function(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
def fake_metric(df, class_col, param=5):
return 1/((df.sum(axis=1)*class_col).sum()/param)
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test4_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', metric=fake_metric, G= input_DG, param=6)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test5_no_graph(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
with pytest.raises(RuntimeError) as excinfo:
_ = hill_climbing_filter(input_df, 'class', beta=0.5, k=2)
assert "df.attrs['hierarchy]" in str(excinfo.value)
class TestHierarchyBasedFilter():
def test1_no_pruning_info_gain_with_G(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test1_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, G=input_DG, metric="info_gain", pruning=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_no_pruning_correlation(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test2_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, G=input_DG, metric="correlation", pruning=False)
| pd.testing.assert_frame_equal(output_df, expected_df, check_like=True) | pandas.testing.assert_frame_equal |
from os import path
from app.api import fill_missing_dates
from app.api.gsheets import csv_url_for_sheets_url, save_to_sheet
import pandas as pd
def get_all_state_urls():
# TODO: move this out into something like config.py so it's not buried here
url_link = 'https://docs.google.com/spreadsheets/d/1kBL149bp8PWd_NMFm8Gxj-jXToSNEU9YNgQs0o9tREs/gviz/tq?tqx=out:csv&sheet=State_links'
url_df = pd.read_csv(url_link)
return url_df
def get_entry_url(state, url_df):
return url_df.loc[url_df.State == state].iloc[0].Entry
def get_final_url(state, url_df):
return url_df.loc[url_df.State == state].iloc[0].Final
# Using the standard facility sheet organization, creates a column name map for corresponding column
# names, cumulative -> current outbreak metric columns.
def make_matching_column_name_map(df):
num_numeric_cols = 12 # number of metrics
first_metric_col = 14 # position of 1st metric, "Cumulative Resident Positives"
col_map = {}
for i in range(num_numeric_cols):
cumulative_col = df.columns[first_metric_col+i]
outbreak_col = df.columns[first_metric_col+i+num_numeric_cols]
col_map[cumulative_col] = outbreak_col
return col_map
# Uppercases county/city/facility/outbreak status entries, for easier comparison. Modifies in place
def standardize_data(df):
df[['County', 'City', 'Facility', 'Outbrk_Status', 'State_Facility_Type']] = \
df[['County', 'City', 'Facility', 'Outbrk_Status', 'State_Facility_Type']].fillna(value='')
for colname in ['County', 'City', 'Facility', 'Outbrk_Status', 'State_Facility_Type']:
df[colname] = df[colname].str.upper().str.strip()
# drop any rows with empty dates
df.drop(df[ | pd.isnull(df['Date']) | pandas.isnull |
import time
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from sklearn.preprocessing import scale
from sklearn.decomposition import pca
import fancyimpute
from sklearn.preprocessing import StandardScaler
import xgbfir
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
import config
import work_data
import models
import os
def main():
np.random.seed(42)
logger = config.config_logger(__name__, 10)
t0 = time.time()
train_client_path = './data/raw/csv/train_clientes.csv'
train_reque_path = './data/raw/csv/train_requerimientos.csv'
test_client_path = './data/raw/csv/test_clientes.csv'
test_reque_path = './data/raw/csv/test_requerimientos.csv'
output_path = './output/'
do_merge = False
write_impute_test = False
write_output = False
add_variables = False
version = 6
logger.info('Beginning execution')
logger.info('Load dataframes')
test_client = pd.read_csv(test_client_path, header=0)
test_reque = pd.read_csv(test_reque_path, header=0)
main_client = | pd.read_csv(train_client_path, header=0) | pandas.read_csv |
import os
from pathlib import Path
import joblib
import pandas as pd
import numpy as np
from multiprocessing import Pool
from collections import defaultdict
import functools
import re
import sys
sys.path.insert(0, './code')
from utils import DataLogger # noqa: E402
class DataNotFoundException(Exception):
pass
def get_time_split(df):
df_12 = df[df['dt'] <= 12]
df_16 = df[(df['dt'] > 12) & (df['dt'] <= 16)]
# df_20 = df[(df['dt'] > 16) & (df['dt'] <= 19)]
# df_21 = df[(df['dt'] > 17) & (df['dt'] <= 20)]
df_22 = df[(df['dt'] > 18) & (df['dt'] <= 21)]
df_23 = df[(df['dt'] > 19) & (df['dt'] <= 22)]
# df_24 = df[(df['dt'] > 20) & (df['dt'] <= 23)]
# df_25 = df[(df['dt'] > 21) & (df['dt'] <= 24)]
r_dict = {
"one_to_twelve": df_12,
"twelve_to_sixteen": df_16,
# "prev_three_months_20": df_20,
# "prev_three_months_21": df_21,
"prev_three_months_22": df_22,
"prev_three_months_23": df_23,
# "prev_three_months_24": df_24,
# "prev_three_months_25": df_25
}
return r_dict
def get_merge_dict():
merge_dict = {
# 20: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_20"],
# 21: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_21"],
22: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_22"],
23: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_23"],
# 24: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_24"],
# 25: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_25"],
}
return merge_dict
def get_time_split_result(a_func):
@functools.wraps(a_func)
def wrapper(self, df):
r_dict = defaultdict(list)
df_dict = get_time_split(df)
use_dict = {key: a_func(self, df_dict[key]) for key in df_dict.keys()}
merge_dict = get_merge_dict()
for dt in merge_dict.keys():
vals_12 = use_dict[merge_dict[dt][0]]
vals_16 = use_dict[merge_dict[dt][1]]
vals_prevs = use_dict[merge_dict[dt][2]]
for val, val_12, val_16 in zip(vals_prevs, vals_12, vals_16):
name = val[0]
name_12 = "{}_12".format(name)
name_16 = "{}_16".format(name)
r_dict[name].append(val[1])
r_dict[name_12].append(val_12[1])
r_dict[name_16].append(val_16[1])
return r_dict
return wrapper
class DataLoader():
def __init__(self):
self.output_path = Path(os.path.abspath(os.getcwd())) / 'output'
self.input_path = Path(os.path.abspath(os.getcwd())) / 'input'
self.model_path = Path(os.path.abspath(os.getcwd())) / 'model'
def save_data(self, cls, data_name, message):
logger = DataLogger()
logger.save_data("Save data {} is generated from {}".format(
data_name, message))
joblib.dump(cls, self.output_path / data_name)
logger.save_data("{} is sucessfully saved".format(data_name))
def load_data(self, data_name, data_type='joblib', **kwargs):
if data_type == 'joblib':
data = joblib.load(self.input_path / data_name, **kwargs)
elif data_type == 'csv':
data = pd.read_csv(self.input_path / data_name, **kwargs)
return data
def load_result(self, data_name, data_type='joblib', **kwargs):
if data_type == 'joblib':
data = joblib.load(self.output_path / data_name, **kwargs)
elif data_type == 'csv':
data = pd.read_csv(self.output_path / data_name, **kwargs)
return data
class FeatLoader(DataLoader):
def __init__(self):
super(FeatLoader, self).__init__()
self.required_cate = ('2', '6', '10', '12', '13', '15', '18', '19',
'21', '22', '25', '26', '36', '37', '39', '48')
self.shop_cate = [str(i + 1) for i in range(48)] + ['other']
self.shop_amt = [
"shop_{}_amt".format(shop_tag) for shop_tag in self.shop_cate
]
self.shop_cnt = [
"shop_{}_cnt".format(shop_tag) for shop_tag in self.shop_cate
]
self.card_cate = [str(i + 1) for i in range(14)] + ['other']
self.card_amt = [
"card_{}_txn_amt".format(card_cate) for card_cate in self.card_cate
]
self.card_cnt = [
"card_{}_txn_cnt".format(card_cate) for card_cate in self.card_cate
]
self.count = 0
self.profile_cate = [
"masts",
"educd",
"trdtp",
"naty",
"poscd",
"cuorg",
"primary_card",
"slam",
"age",
"gender_code",
]
self.basic_info = [
'masts',
'educd',
'trdtp',
'naty',
'poscd',
'cuorg',
'primary_card',
'age',
'gender_code',
]
self.dts = [dt for dt in range(1, 25)]
def update_data(self, data):
self.data = data.copy()
class AmtFeatLoader(FeatLoader):
def __init__(self):
super(AmtFeatLoader, self).__init__()
self.get_feat_config()
def update_a_df(self, df):
result = {'chid': df['chid'].iloc[0]}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
# result = pd.DataFrame(result)
return result
def get_feat_config(self):
self.feat_config = {self.get_amt_by_months}
def get_amt_by_months(self, df):
def get_shop_amt_cate(x):
dt, shop_tag = x
name = "shop_{}_amt_{}".format(shop_tag, dt)
return name
result = {}
for dt in range(1, 25):
for shop_amt_cate in self.shop_amt:
result.update({shop_amt_cate + '_{}'.format(dt): 0})
if df.empty:
return result
else:
df['shop_amt_cate'] = df[['dt',
'shop_tag']].apply(get_shop_amt_cate,
axis=1)
amt_dict = {
shop_amt_cate: amt
for amt, shop_amt_cate in zip(df['txn_amt'],
df['shop_amt_cate'])
}
result.update(amt_dict)
return result
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.DataFrame(feat_group)
class ProfileFeatLoader(FeatLoader):
def __init__(self):
super(ProfileFeatLoader, self).__init__()
self.get_feat_config()
self.card_cnt_pct = [
"card_{}_cnt_pct".format(cate) for cate in self.card_cate
]
self.card_avg_amt = [
"card_{}_avg_amt".format(cate) for cate in self.card_cate
]
def fit(self):
# run 500000 times loop
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
self.data = self.get_early_calculation(self.data)
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.concat(feat_group)
def get_early_calculation(self, df):
df['avg_amt'] = df['txn_amt'] / df['txn_cnt']
df['offline_cnt_pct'] = df['txn_cnt'] / (df['domestic_offline_cnt'] +
df['overseas_offline_cnt'])
df['online_cnt_pct'] = df['txn_cnt'] / (df['domestic_online_cnt'] +
df['overseas_online_cnt'])
df['domestic_cnt_pct'] = df['txn_cnt'] / (df['domestic_offline_cnt'] +
df['domestic_online_cnt'])
df['overseas_cnt_pct'] = df['txn_cnt'] / (df['overseas_offline_cnt'] +
df['overseas_online_cnt'])
# generate card amt
for cate in self.card_cate:
df['card_{}_txn_amt'.format(
cate)] = df['card_{}_txn_amt_pct'.format(cate)] * df['txn_amt']
# generate card cnt ratio
for cate in self.card_cate:
new_key = "card_{}_cnt_pct".format(cate)
cnt_key = "card_{}_txn_cnt".format(cate)
df[new_key] = df[cnt_key] / df['txn_cnt']
# generate the avg for card cate
for cate in self.card_cate:
new_key = "card_{}_avg_amt".format(cate)
amt_key = "card_{}_txn_amt".format(cate)
cnt_key = "card_{}_txn_cnt".format(cate)
df[new_key] = df[amt_key] / df[cnt_key]
return df
def update_a_df(self, df):
# df: user history records
result = {
'dt': [22, 23],
'chid': [df['chid'].iloc[0]] * 2,
}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
result = pd.DataFrame(result)
return result
def get_feat_config(self):
self.feat_config = {
# 最開始使用信用卡時間 #首刷月
# 離首刷月多久
self.get_start_use_dt,
# # 消費多少種類
# # 消費多少重要種類
self.get_how_many_tags,
# # basic info
self.get_basic_profile,
}
def get_basic_profile(self, df):
if df.empty:
r_dict = {
profile_cate: [-1] * 3
for profile_cate in self.profile_cate
}
else:
r_dict = {
profile_cate: df[profile_cate].iloc[0]
for profile_cate in self.profile_cate
}
return r_dict
@get_time_split_result
def get_how_many_tags(self, df):
if df.empty:
r_list = [("how_many_tag", -1), ("how_many_tag_imp", -1)]
else:
how_many_tag = len(df['shop_tag'].unique())
how_many_tag_imp = len(df[df['shop_tag'].isin(
self.required_cate)]['shop_tag'].unique())
r_list = [("how_many_tag", how_many_tag),
("how_many_tag_imp", how_many_tag_imp)]
return r_list
def get_start_use_dt(self, df):
if df.empty:
r_dict = {"start_dt": [-1] * 2, "how_long_dt": [-1] * 2}
else:
start_dt = df['dt'].iloc[0]
how_long_dt = np.array([24, 25]) - np.array([start_dt] * 2)
r_dict = {
"start_dt": [start_dt] * 2,
"how_long_dt": list(how_long_dt)
}
return r_dict
class CntFeatLoader(FeatLoader):
def __init__(self):
super(CntFeatLoader, self).__init__()
self.get_feat_config()
def get_feat_config(self):
self.feat_config = {self.get_cnt_by_months}
def get_cnt_by_months(self, df):
def get_shop_cnt_cate(x):
dt, shop_tag = x
name = "shop_{}_cnt_{}".format(shop_tag, dt)
return name
result = {}
for dt in range(1, 25):
for shop_cnt_cate in self.shop_cnt:
result.update({shop_cnt_cate + '_{}'.format(dt): 0})
if df.empty:
return result
else:
df['shop_cnt_cate'] = df[['dt',
'shop_tag']].apply(get_shop_cnt_cate,
axis=1)
cnt_dict = {
shop_cnt_cate: cnt
for cnt, shop_cnt_cate in zip(df['txn_cnt'],
df['shop_cnt_cate'])
}
result.update(cnt_dict)
return result
def update_a_df(self, df):
result = {'chid': df['chid'].iloc[0]}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
return result
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.DataFrame(feat_group)
class RankTopFeatLoader(FeatLoader):
def __init__(self):
super(RankTopFeatLoader, self).__init__()
self.get_feat_config()
self.shop_cate_map = {
i: a_shop_cate
for i, a_shop_cate in enumerate(self.shop_cate)
}
self.imp_cate_map = {
i: imp_cate
for i, imp_cate in enumerate(self.required_cate)
}
def update_a_df(self, df):
print(df.columns[0])
result = []
for feat_func in self.feat_config:
result.append(feat_func(df))
tops = pd.concat(result, axis=1)
return tops
def get_feat_config(self):
self.feat_config = [
self.get_tops_by_months,
self.get_imp_tops_by_months,
]
def get_tops_by_months(self, df):
dt = df.columns[0].split('_')[-1]
top3 = df.apply(lambda x: np.argsort(x), axis=1).iloc[:, -3:]
top3.columns = [
'top3_{}'.format(dt), 'top2_{}'.format(dt), 'top1_{}'.format(dt)
]
for col in top3.columns:
top3[col] = top3[col].map(self.shop_cate_map)
top3['how_many_cate_{}'.format(dt)] = df.gt(0).sum(axis=1)
top3.loc[
top3['how_many_cate_{}'.format(dt)] == 0,
['top3_{}'.format(dt), 'top2_{}'.format(dt), 'top1_{}'.
format(dt)]] = "-1"
top3.loc[top3['how_many_cate_{}'.format(dt)] == 1,
['top3_{}'.format(dt), 'top2_{}'.format(dt)]] = "-1"
top3.loc[top3['how_many_cate_{}'.format(dt)] == 2,
['top3_{}'.format(dt)]] = "-1"
return top3
def get_imp_tops_by_months(self, df):
dt = df.columns[0].split('_')[-1]
reg = r"shop_(\d+_|other_)(.+)_\d+"
fetch_type = re.findall(reg, df.columns[0])[0][1]
imp_cols = [
"shop_{}_{}_{}".format(a_cate, fetch_type, dt)
for a_cate in self.required_cate
]
imp_df = df[imp_cols].copy()
imp_top3 = imp_df.apply(lambda x: np.argsort(x), axis=1).iloc[:, -3:]
imp_top3.columns = [
'imp_top3_{}'.format(dt), 'imp_top2_{}'.format(dt),
'imp_top1_{}'.format(dt)
]
for col in imp_top3.columns:
imp_top3[col] = imp_top3[col].map(self.imp_cate_map)
imp_top3['how_many_cate_imp_{}'.format(dt)] = imp_df.gt(0).sum(axis=1)
imp_top3.loc[imp_top3["how_many_cate_imp_{}".format(dt)] == 0, [
"imp_top3_{}".format(dt), "imp_top2_{}".format(dt), "imp_top1_{}".
format(dt)
]] = "-1"
imp_top3.loc[
imp_top3["how_many_cate_imp_{}".format(dt)] == 1,
["imp_top3_{}".format(dt), "imp_top2_{}".format(dt)]] = "-1"
imp_top3.loc[imp_top3['how_many_cate_imp_{}'.format(dt)] == 2,
['imp_top3_{}'.format(dt)]] = "-1"
return imp_top3
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
feats = [self.data[['chid']].reset_index(drop=True)]
df = self.data.drop("chid", axis=1).reset_index(drop=True)
cols = list(df.columns)
cols_group = [cols[dt * 49:(1 + dt) * 49] for dt in range(24)]
df_group = [df[col_seg] for col_seg in cols_group]
pool = Pool(4, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = | pd.concat(feats + feat_group, axis=1) | pandas.concat |
import contextlib
import json
import gzip
import io
import logging
import os.path
import pickle
import random
import shutil
import sys
import tempfile
import traceback
import unittest
import pandas
COMMON_PRIMITIVES_DIR = os.path.join(os.path.dirname(__file__), 'common-primitives')
# NOTE: This insertion should appear before any code attempting to resolve or load primitives,
# so the git submodule version of `common-primitives` is looked at first.
sys.path.insert(0, COMMON_PRIMITIVES_DIR)
TEST_PRIMITIVES_DIR = os.path.join(os.path.dirname(__file__), 'data', 'primitives')
sys.path.insert(0, TEST_PRIMITIVES_DIR)
from common_primitives.column_parser import ColumnParserPrimitive
from common_primitives.construct_predictions import ConstructPredictionsPrimitive
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives.no_split import NoSplitDatasetSplitPrimitive
from common_primitives.random_forest import RandomForestClassifierPrimitive
from common_primitives.train_score_split import TrainScoreDatasetSplitPrimitive
from test_primitives.random_classifier import RandomClassifierPrimitive
from test_primitives.fake_score import FakeScorePrimitive
from d3m import cli, index, runtime, utils
from d3m.container import dataset as dataset_module
from d3m.contrib.primitives.compute_scores import ComputeScoresPrimitive
from d3m.metadata import base as metadata_base, pipeline as pipeline_module, pipeline_run as pipeline_run_module, problem as problem_module
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
PROBLEM_DIR = os.path.join(TEST_DATA_DIR, 'problems')
DATASET_DIR = os.path.join(TEST_DATA_DIR, 'datasets')
PIPELINE_DIR = os.path.join(TEST_DATA_DIR, 'pipelines')
class TestCLIRuntime(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
@classmethod
def setUpClass(cls):
to_register = {
'd3m.primitives.data_transformation.dataset_to_dataframe.Common': DatasetToDataFramePrimitive,
'd3m.primitives.classification.random_forest.Common': RandomForestClassifierPrimitive,
'd3m.primitives.classification.random_classifier.Test': RandomClassifierPrimitive,
'd3m.primitives.data_transformation.column_parser.Common': ColumnParserPrimitive,
'd3m.primitives.data_transformation.construct_predictions.Common': ConstructPredictionsPrimitive,
'd3m.primitives.evaluation.no_split_dataset_split.Common': NoSplitDatasetSplitPrimitive,
'd3m.primitives.evaluation.compute_scores.Test': FakeScorePrimitive,
'd3m.primitives.evaluation.train_score_dataset_split.Common': TrainScoreDatasetSplitPrimitive,
# We do not have to load this primitive, but loading it here prevents the package from loading all primitives.
'd3m.primitives.evaluation.compute_scores.Core': ComputeScoresPrimitive,
}
# To hide any logging or stdout output.
with utils.silence():
for python_path, primitive in to_register.items():
index.register_primitive(python_path, primitive)
def _call_cli_runtime(self, arg):
logger = logging.getLogger('d3m.runtime')
with utils.silence():
with self.assertLogs(logger=logger) as cm:
# So that at least one message is logged.
logger.warning("Debugging.")
cli.main(arg)
# We skip our "debugging" message.
return cm.records[1:]
def _call_cli_runtime_without_fail(self, arg):
try:
return self._call_cli_runtime(arg)
except Exception as e:
self.fail(traceback.format_exc())
def _assert_valid_saved_pipeline_runs(self, pipeline_run_save_path):
with open(pipeline_run_save_path, 'r') as f:
for pipeline_run_dict in list(utils.yaml_load_all(f)):
try:
pipeline_run_module.validate_pipeline_run(pipeline_run_dict)
except Exception as e:
self.fail(traceback.format_exc())
def _validate_previous_pipeline_run_ids(self, pipeline_run_save_path):
ids = set()
prev_ids = set()
with open(pipeline_run_save_path, 'r') as f:
for pipeline_run_dict in list(utils.yaml_load_all(f)):
ids.add(pipeline_run_dict['id'])
if 'previous_pipeline_run' in pipeline_run_dict:
prev_ids.add(pipeline_run_dict['previous_pipeline_run']['id'])
self.assertTrue(
prev_ids.issubset(ids),
'Some previous pipeline run ids {} are not in the set of pipeline run ids {}'.format(prev_ids, ids)
)
def test_fit_multi_input(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
def test_fit_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--save',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'--output',
output_csv_path,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11225, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11225, outputs_path='output.csv')
def test_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-no-problem-pipeline')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--save',
fitted_pipeline_path,
]
self._call_cli_runtime_without_fail(arg)
arg = [
'',
'runtime',
'produce',
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--output',
output_csv_path,
'--fitted-pipeline',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-no-problem-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11008, outputs_path='output.csv')
def test_fit_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
output_csv_path = os.path.join(self.test_dir, 'output.csv')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--output',
output_csv_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=11008, outputs_path='output.csv')
def test_nonstandard_fit_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--save',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'--not-standard-pipeline',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=10710, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_nonstandard_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--save',
fitted_pipeline_path,
'--not-standard-pipeline'
]
self._call_cli_runtime_without_fail(arg)
arg = [
'',
'runtime',
'produce',
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--fitted-pipeline',
fitted_pipeline_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json'
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=12106, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_nonstandard_fit_produce_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'semi-standard-pipeline.json'),
'--expose-produced-outputs',
self.test_dir,
'--not-standard-pipeline',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'outputs.1/data.csv',
'outputs.1/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=12106, outputs_path='outputs.0/data.csv')
self._assert_nonstandard_output(outputs_name='outputs.1')
def test_fit_produce_multi_input(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json',
])
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self._assert_standard_output_metadata()
self._assert_prediction_sum(prediction_sum=11008, outputs_path='outputs.0/data.csv')
def test_fit_score(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-score',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--scores',
os.path.join(self.test_dir, 'scores.csv'),
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(os.path.join(self.test_dir, 'scores.csv'))
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
def test_fit_score_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-score',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'F1_MACRO',
'--metric',
'ACCURACY',
'--scores',
os.path.join(self.test_dir, 'scores.csv'),
'-O',
pipeline_run_save_path,
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(os.path.join(self.test_dir, 'scores.csv'))
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
@staticmethod
def _get_iris_dataset_path():
return os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json')
@staticmethod
def _get_iris_problem_path():
return os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json')
@staticmethod
def _get_random_forest_pipeline_path():
return os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml')
@staticmethod
def _get_no_split_data_pipeline_path():
return os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml')
@staticmethod
def _get_train_test_split_data_pipeline_path():
return os.path.join(PIPELINE_DIR, 'data-preparation-train-test-split.yml')
def _get_pipeline_run_save_path(self):
return os.path.join(self.test_dir, 'pipeline_run.yml')
def _get_predictions_path(self):
return os.path.join(self.test_dir, 'predictions.csv')
def _get_scores_path(self):
return os.path.join(self.test_dir, 'scores.csv')
def _get_pipeline_rerun_save_path(self):
return os.path.join(self.test_dir, 'pipeline_rerun.yml')
def _get_rescores_path(self):
return os.path.join(self.test_dir, 'rescores.csv')
def _fit_iris_random_forest(
self, *, predictions_path=None, fitted_pipeline_path=None, pipeline_run_save_path=None
):
if pipeline_run_save_path is None:
pipeline_run_save_path = self._get_pipeline_run_save_path()
arg = [
'',
'runtime',
'fit',
'--input',
self._get_iris_dataset_path(),
'--problem',
self._get_iris_problem_path(),
'--pipeline',
self._get_random_forest_pipeline_path(),
'-O',
pipeline_run_save_path
]
if predictions_path is not None:
arg.append('--output')
arg.append(predictions_path)
if fitted_pipeline_path is not None:
arg.append('--save')
arg.append(fitted_pipeline_path)
self._call_cli_runtime_without_fail(arg)
def _fit_iris_random_classifier_without_problem(self, *, fitted_pipeline_path):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'-O',
pipeline_run_save_path
]
if fitted_pipeline_path is not None:
arg.append('--save')
arg.append(fitted_pipeline_path)
self._call_cli_runtime_without_fail(arg)
def test_fit(self):
pipeline_run_save_path = self._get_pipeline_run_save_path()
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
self._fit_iris_random_forest(
fitted_pipeline_path=fitted_pipeline_path, pipeline_run_save_path=pipeline_run_save_path
)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
self.assertTrue(os.path.isfile(pipeline_run_save_path))
def test_evaluate(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scores',
scores_path,
'--metric',
'ACCURACY',
'--metric',
'F1_MACRO',
'-O',
pipeline_run_save_path
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0], ['F1_MACRO', 1.0, 1.0, 0, 0]])
def test_evaluate_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'ACCURACY',
'--scores',
scores_path,
'-O',
pipeline_run_save_path
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed', 'fold'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0, 0]])
def test_score(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_forest(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scores',
scores_path,
'--metric',
'F1_MACRO',
'--metric',
'ACCURACY',
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['F1_MACRO', 1.0, 1.0, 0], ['ACCURACY', 1.0, 1.0, 0]])
def test_score_without_problem_without_metric(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_classifier_without_problem(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
'--scores',
scores_path,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
def test_score_without_problem(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_classifier_without_problem(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--scoring-pipeline',
os.path.join(PIPELINE_DIR, 'fake_compute_score.yml'),
# this argument has no effect
'--metric',
'ACCURACY',
'--scores',
scores_path,
'-O',
pipeline_run_save_path,
]
logging_records = self._call_cli_runtime_without_fail(arg)
self.assertEqual(len(logging_records), 1)
self.assertEqual(logging_records[0].msg, "Not all provided hyper-parameters for the scoring pipeline %(pipeline_id)s were used: %(unused_params)s")
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized', 'randomSeed'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0, 0]])
def test_produce(self):
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'iris-pipeline')
self._fit_iris_random_forest(fitted_pipeline_path=fitted_pipeline_path)
self.assertTrue(os.path.isfile(fitted_pipeline_path))
arg = [
'',
'runtime',
'produce',
'--fitted-pipeline',
fitted_pipeline_path,
'--test-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
def test_score_predictions(self):
predictions_path = os.path.join(self.test_dir, 'predictions.csv')
self._fit_iris_random_forest(predictions_path=predictions_path)
self.assertTrue(os.path.isfile(predictions_path))
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'score-predictions',
'--score-input',
os.path.join(DATASET_DIR, 'iris_dataset_1/datasetDoc.json'),
'--problem',
os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'),
'--predictions',
predictions_path,
'--metric',
'ACCURACY',
'--metric',
'F1_MACRO',
'--scores',
scores_path,
]
self._call_cli_runtime_without_fail(arg)
self.assertTrue(os.path.isfile(scores_path), 'scores were not generated')
dataframe = pandas.read_csv(scores_path)
self.assertEqual(list(dataframe.columns), ['metric', 'value', 'normalized'])
self.assertEqual(dataframe.values.tolist(), [['ACCURACY', 1.0, 1.0], ['F1_MACRO', 1.0, 1.0]])
def test_sklearn_dataset_fit_produce(self):
self._create_sklearn_iris_problem_doc()
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
arg = [
'',
'runtime',
'fit-produce',
'--input',
'sklearn://iris',
'--input',
'sklearn://iris',
'--problem',
os.path.join(self.test_dir, 'problemDoc.json'),
'--test-input',
'sklearn://iris',
'--test-input',
'sklearn://iris',
'--pipeline',
os.path.join(PIPELINE_DIR, 'multi-input-test.json'),
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self.assertEqual(utils.list_files(self.test_dir), [
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'problemDoc.json',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json'
])
self._assert_standard_output_metadata(prediction_type='numpy.int64')
self._assert_prediction_sum(prediction_sum=10648, outputs_path='outputs.0/data.csv')
def test_sklearn_dataset_fit_produce_without_problem(self):
output_csv_path = os.path.join(self.test_dir, 'output.csv')
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
fitted_pipeline_path = os.path.join(self.test_dir, 'fitted-pipeline')
arg = [
'',
'runtime',
'fit-produce',
'--input',
'sklearn://iris',
'--test-input',
'sklearn://iris',
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-classifier.yml'),
'--save',
fitted_pipeline_path,
'--output',
output_csv_path,
'--expose-produced-outputs',
self.test_dir,
'-O',
pipeline_run_save_path,
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
self.assertEqual(utils.list_files(self.test_dir), [
'fitted-pipeline',
'output.csv',
'outputs.0/data.csv',
'outputs.0/metadata.json',
'pipeline_run.yml',
'steps.0.produce/data.csv',
'steps.0.produce/metadata.json',
'steps.1.produce/data.csv',
'steps.1.produce/metadata.json',
'steps.2.produce/data.csv',
'steps.2.produce/metadata.json',
])
self._assert_standard_output_metadata(prediction_type='numpy.int64')
self._assert_prediction_sum(prediction_sum=10648, outputs_path='outputs.0/data.csv')
self._assert_prediction_sum(prediction_sum=10648, outputs_path='output.csv')
def _create_sklearn_iris_problem_doc(self):
with open(os.path.join(PROBLEM_DIR, 'iris_problem_1/problemDoc.json'), 'r', encoding='utf8') as problem_doc_file:
problem_doc = json.load(problem_doc_file)
problem_doc['inputs']['data'][0]['datasetID'] = 'sklearn://iris'
with open(os.path.join(self.test_dir, 'problemDoc.json'), 'x', encoding='utf8') as problem_doc_file:
json.dump(problem_doc, problem_doc_file)
def test_sklearn_dataset_evaluate(self):
self._create_sklearn_iris_problem_doc()
pipeline_run_save_path = os.path.join(self.test_dir, 'pipeline_run.yml')
scores_path = os.path.join(self.test_dir, 'scores.csv')
arg = [
'',
'runtime',
'evaluate',
'--input',
'sklearn://iris',
'--problem',
os.path.join(self.test_dir, 'problemDoc.json'),
'--pipeline',
os.path.join(PIPELINE_DIR, 'random-forest-classifier.yml'),
'--data-pipeline',
os.path.join(PIPELINE_DIR, 'data-preparation-no-split.yml'),
'--scores',
scores_path,
'--metric',
'ACCURACY',
'--metric',
'F1_MACRO',
'-O',
pipeline_run_save_path
]
self._call_cli_runtime_without_fail(arg)
self._assert_valid_saved_pipeline_runs(pipeline_run_save_path)
self._validate_previous_pipeline_run_ids(pipeline_run_save_path)
dataframe = | pandas.read_csv(scores_path) | pandas.read_csv |
import json
import math
import os
import random
import sys
import time
import warnings
from functools import reduce
from itertools import combinations, product
from operator import add
from typing import List, Sequence, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pretty_errors
import scipy.optimize as sco
import seaborn as sns
import statsmodels.api as sm
from pyecharts import options as opts
from pyecharts.charts import Bar, Grid, Kline, Line
from pyecharts.commons.utils import JsCode
from statsmodels import regression
from tqdm import tqdm, trange
sys.path.append(os.path.dirname(__file__) + os.sep + '../')
try:
from ..data.Stock import StockData
from ..log.log import hide, makedir, progress_bar, show, slog, sprint
except:
from data.Stock import StockData
from log.log import hide, makedir, progress_bar, show, slog, sprint
warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = ['FangSong']
plt.rcParams['axes.unicode_minus'] = False # 正常显示负号
plt.rcParams['font.size'] = 13
class Markovitz(object):
'''
组合投资权重\n
names=['贵州茅台', '隆基股份', '五粮液']\n
start_date='2021-05-01'\n
end_date='2021-11-01'\n
frequency='d' --> d/w/m\n
rfr=0.023467/365\n
funds=10000000\n
path --> 默认缓存路径为:".\\Suluoya cache\\",可传入False不缓存
'''
def __init__(self, names=['比亚迪', '阳光电源', '璞泰来', '紫光国微', '盛新锂能'],
start_date='2021-05-01',
end_date='2021-11-01',
frequency='d',
rfr=0.023467,
funds=10000000,
path='.\\Markovitz cache\\'):
self.names = names
self.lens = len(names)
self.start_date = start_date
self.end_date = end_date
self.frequency = frequency
self.rfr = (rfr*100) / \
{'d': 365, 'w': 52, 'm': 30}[frequency]
self.funds = funds
self.path = path
if self.path:
makedir(self.path, '')
sprint('Initializing...')
if not self.path:
sd = StockData(names=self.names, start_date=self.start_date,
end_date=self.end_date, frequency=self.frequency)
self.datas = sd.stocks_data()
else:
try:
self.datas = pd.read_csv(
f'{self.path}\\stock data\\stocks_data.csv')
except:
sd = StockData(names=self.names, start_date=self.start_date,
end_date=self.end_date, frequency=self.frequency, path=self.path)
self.datas = sd.stocks_data()
self.datas.index = self.datas['name']
self.data = self.datas.reset_index(drop=True)
self.date = list(map(lambda x: str(x)[:10], self.data.date.unique()))
self.first_date = self.date[0]
self.last_date = self.date[-1]
# 第一天开盘价
self.first_price = self.data[self.data.date == self.data.date.unique(
)[0]][['open', 'name']].set_index('name').to_dict()['open']
# 最后一天收盘价
self.last_price = self.data[self.data.date == self.data.date.unique(
)[-1]][['close', 'name']].set_index('name').to_dict()['close']
# 每只股票最大手数
self.max_shares_dict = {name: math.floor(
self.funds/(shares*100)) for name, shares in self.last_price.items()}
def weights(self, number=5000):
'''
生成股票随机权重
'''
return np.random.dirichlet(np.ones(self.lens), size=number)
def calculate(self):
'''
计算收益率均值、协方差矩阵、相关系数矩阵
'''
data = self.data[['date', 'name', 'pctChg']]
# 收益率均值
data_mean = data.groupby('name').mean().T[self.names]
# 协方差矩阵 & 相关系数矩阵
df = pd.DataFrame()
for name in self.names:
df[name] = list(data[data['name'] == name]['pctChg'])
data_cov = df.cov()
data_corr = df.corr()
if self.path:
makedir(self.path, 'mean,cov,corr')
data_mean.T.to_csv(
f'{self.path}\\mean,cov,corr\\data_mean.csv')
data_cov.to_csv(f'{self.path}\\mean,cov,corr\\data_cov.csv')
data_corr.to_csv(f'{self.path}\\mean,cov,corr\\data_corr.csv')
return {'mean': data_mean, 'cov': data_cov, 'correlation': data_corr}
def heatmap(self, show=True):
'''
收益率相关系数热力图
'''
if self.path:
try:
data_corr = pd.read_csv(f'{self.path}\\mean,cov,corr\\data_corr.csv').rename(
{'Unnamed: 0', 'correlation'}).set_index('correlation')
except:
data_corr = self.calculate()['correlation']
else:
data = self.data[['name', 'pctChg']]
df = pd.DataFrame()
for name in self.names:
df[name] = list(data[data['name'] == name]['pctChg'])
data_corr = df.corr()
# 画图
# plt.subplots(figsize=(9, 9))
sns.heatmap(data_corr, annot=True, vmax=1,
square=True, cmap='Purples', cbar=False)
if show:
plt.show()
else:
plt.savefig(f'{self.path}\\heatmap.svg', format='svg')
def sharpe(self, weights):
'''
按照names顺序传入权重(权重和为1)
'''
data_dict = self.calculate()
data_mean = data_dict['mean']
data_cov = data_dict['cov']
weights = np.array(weights)
rate = data_mean.dot(weights.T)['pctChg']
risk = np.sqrt(weights.dot(data_cov).dot(weights.T))
return (self.rfr-rate)/risk # 相反数
def optimization(self):
'''
非线性规划求解最大夏普比率和对应权重
'''
opts = sco.minimize(fun=self.sharpe,
# 传入股票权重,即shapre函数的参数weights
x0=np.ones(self.lens)/self.lens,
bounds=tuple((0, 1)for x in range(self.lens)),
constraints={'type': 'eq',
'fun': lambda x: np.sum(x) - 1}
)
opt_dict = {'weights': dict(
zip(self.names, list(opts.x))), 'sharpe': -opts.fun}
if self.path:
df_opt = pd.DataFrame(opt_dict)
df_opt['sharpe'] = None
df_opt['sharpe'].iloc[0] = opt_dict['sharpe']
df_opt.to_csv(f'{self.path}\\max sharpe and weights.csv')
return opt_dict
def scatter_data(self, number=5000):
'''
散点数据,默认生成5000个
'''
data_dict = self.calculate()
data_mean = data_dict['mean']
data_cov = data_dict['cov']
weights = self.weights(number=number)
# 散点DataFrame
df_scatter = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import os
import logging
import argparse
import numpy as np
from io import StringIO
import pandas as pd
from model.vanilla import classification_model as cm
from sklearn.metrics import classification_report as class_report
from data_utils.testset import load_test_set
from data_utils.dataviz import test_confusion_matrix, samples_viz
from log import config_logger
from config import IN_SHAPE
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
config_logger()
log = logging.getLogger('simpsons')
def test(model_path, weights, testset_path):
# read labels
with open(os.path.join(model_path, 'labels.txt'), 'r') as file:
content = file.read()
labels = content.split('\n')
file.close()
log.info(f'Labels loaded succesfully - n_classes: {len(labels)}')
# create model
model = cm(input_shape=IN_SHAPE, class_num=len(labels))
model.load_weights(weights)
log.info('Model loaded succesfully')
# load the test set
# we need the test labels for the classification report
# and the confusion matrix
X_test, y_test, t_labels, t_names = load_test_set(testset_path, labels)
y_pred = model.predict(np.array(X_test))
log.info(f'Test on: {len(t_names)} labels')
# get the test true labex index
# and the predictions argmax index
y_test_true = np.where(y_test > 0)[1]
y_pred_argmax = np.argmax(y_pred, axis=1)
# write results per image csv
# for every test image, we write:
# image_name / prediction label name / score
results_file_path = os.path.join(model_path, 'test_results.csv')
test_images = sorted(os.listdir(testset_path))
results = 'Image, Prediction, Score \n'
for i, image in enumerate(test_images):
pred_label = labels[y_pred_argmax[i]]
pred_score = max(y_pred[i])
results += (f'\n {image}, {pred_label}, {pred_score:.2}\n')
result_data = StringIO(results)
results_df = pd.read_csv(result_data, sep=',')
results_df.to_csv(results_file_path)
log.info(f'Results per pic written in {results_file_path}')
# write the classification report
# this report includes only the test labels
# which are less than the model labels
report_file_path = os.path.join(model_path, 'class_report.csv')
report = class_report(y_test_true,
y_pred_argmax,
labels=t_labels,
target_names=t_names,
zero_division=False,
output_dict=True)
clsf_report = | pd.DataFrame(report) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OVERVIEW: DEBRIS THICKNESS ESTIMATES BASED ON DEM DIFFERENCING BANDS
Objective: derive debris thickness using an iterative approach that solves for when the modeled melt rate agrees with
the DEM differencing
If using these methods, cite the following paper:
<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2018). Quantifying debris thickness of
debris-covered glaciers in the Everest region of Nepal through inversion of a subdebris melt model,
Journal of Geophysical Research: Earth Surface, 123(5):1095-1115, doi:10.1029/2017JF004395.
Notes for running the model:
The model uses relative paths to the input and output. This was done to make it easier to share the model and not
have to worry about changing the filenames. That said, the script needs to be run in Matlab with the current folder
open to the location of the script.
Assumptions:
- Slope and aspect of pixel are not changing through time
- Surface lowering only considers flux divergence and surface mass
balance
- Climatic mass balance between Oct 15 - May 15 is zero, i.e., any snow
that has accumulated has melted
Limitations:
- Does not account for shading from surrounding terrain
- Does not account for snowfall thereby potentially overestimating melt
which would underestimate debris thickness
- Does not account for changes in debris thickness over time
Files required to run model successfully:
- DEM (raster)
- Slope (raster)
- Aspect (raster)
- Elevation change (raster)
- Elevation change (.csv each box with uncertainty)
- Glacier outline (raster - 1/0 for glacier/non-glacier pixels)
- Bands/boxes outlines (raster - each pixel is assigned value of box, 0 represents pixel outside of box)
- Emergence velocity (.csv each box with uncertainty)
- Meteorological Data (.csv)
Other important input data
- Lat/Long starting point (center of upper left pixel)
- Lat/Long pixel resolution in degrees
- Number of Monte Carlo simulations
- Time step (debris_prms.delta_t)
- Number of years modeled
- Number of iterations (will affect thickness resolution)
Monte Carlo simulation allows uncertainty to be incorporated into model performance. The debris properties that are
considered are: (1) albedo, (2) surface roughness, and (3) thermal conductivity. Additional uncertainty regarding
products are: (4) error associated with elevation change.
"""
# Built-in libaries
import argparse
import collections
#import datetime
import multiprocessing
import os
import pickle
import time
# External libraries
#import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import median_absolute_deviation
import xarray as xr
# Local libraries
import debrisglobal.globaldebris_input as debris_prms
#import globaldebris_input as input
from spc_split_lists import split_list
#%% FUNCTIONS
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
batchno (optional) : int
batch number used to differentiate output on supercomputer
batches (optional) : int
total number of batches based on supercomputer
num_simultaneous_processes (optional) : int
number of cores to use in parallels
option_parallels (optional) : int
switch to use parallels or not
debug (optional) : int
Switch for turning debug printing on or off (default = 0 (off))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="run simulations from gcm list in parallel")
# add arguments
parser.add_argument('-batchno', action='store', type=int, default=0,
help='Batch number used to differentiate output on supercomputer')
parser.add_argument('-batches', action='store', type=int, default=1,
help='Total number of batches (nodes) for supercomputer')
parser.add_argument('-latlon_fn', action='store', type=str, default=None,
help='Filename containing list of lat/lon tuples for running batches on spc')
parser.add_argument('-num_simultaneous_processes', action='store', type=int, default=4,
help='number of simultaneous processes (cores) to use')
parser.add_argument('-option_parallels', action='store', type=int, default=1,
help='Switch to use or not use parallels (1 - use parallels, 0 - do not)')
parser.add_argument('-option_ordered', action='store', type=int, default=1,
help='switch to keep lists ordered or not')
# parser.add_argument('-option_split_debris', action='store', type=int, default=1,
# help='switch to split the debris thicknesses into separate lists for parallel MC simulations')
parser.add_argument('-debug', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off')
return parser
def pickle_data(fn, data):
"""Pickle data
Parameters
----------
fn : str
filename including filepath
data : list, etc.
data to be pickled
Returns
-------
.pkl file
saves .pkl file of the data
"""
with open(fn, 'wb') as f:
pickle.dump(data, f)
def create_xrdataset(debris_thickness_all=debris_prms.debris_thickness_all, time_values=None,
elev_values=None, stat_cns=debris_prms.mc_stat_cns,
lat_deg=None, lon_deg=None, roi=debris_prms.roi):
"""
Create empty xarray dataset that will be used to record simulation runs.
Parameters
----------
main_glac_rgi : pandas dataframe
dataframe containing relevant rgi glacier information
dates_table : pandas dataframe
table of the dates, months, days in month, etc.
sim_iters : int
number of simulation runs included
stat_cns : list
list of strings containing statistics that will be used on simulations
record_stats : int
Switch to change from recording simulations to statistics
Returns
-------
output_ds_all : xarray Dataset
empty xarray dataset that contains variables and attributes to be filled in by simulation runs
encoding : dictionary
encoding used with exporting xarray dataset to netcdf
"""
# Create empty datasets for each variable and merge them
# Coordinate values
hd_cm_values = (debris_thickness_all*100).astype(int)
# Variable coordinates dictionary
output_coords_dict = collections.OrderedDict()
output_coords_dict['melt'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
output_coords_dict['ts'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
output_coords_dict['snow_depth'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
if 'std' in stat_cns:
output_coords_dict['melt_std'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
output_coords_dict['ts_std'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
output_coords_dict['snow_depth_std'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
if 'med' in stat_cns:
output_coords_dict['melt_med'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
output_coords_dict['ts_med'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
output_coords_dict['snow_depth_med'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
if 'mad' in stat_cns:
output_coords_dict['melt_mad'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
output_coords_dict['ts_mad'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
output_coords_dict['snow_depth_mad'] = collections.OrderedDict([('hd_cm', hd_cm_values), ('time', time_values),
('elev', elev_values)])
# Attributes dictionary
output_attrs_dict = {
'latitude': {'long_name': 'latitude',
'units': 'degrees north'},
'longitude': {'long_name': 'longitude',
'units': 'degrees_east'},
'roi': {'long_name': 'region of interest'},
'time': {'long_name': 'time'},
'hd_cm': {'long_name': 'debris thickness',
'units:': 'cm'},
'elev': {'long_name': 'elevation',
'units': 'm a.s.l.'},
'melt': {'long_name': 'glacier melt, in water equivalent',
'units': 'm'},
'ts': {'long_name': 'surface temperature',
'units': 'K'},
'snow_depth': {'long_name': 'snow depth',
'units': 'm'},
'melt_std': {'long_name': 'glacier melt, in water equivalent, standard deviation',
'units': 'm w.e.'},
'ts_std': {'long_name': 'surface temperature standard deviation',
'units': 'K'},
'snow_depth_std': {'long_name': 'snow depth standard deviation',
'units': 'm'},
'melt_med': {'long_name': 'glacier melt, in water equivalent, median',
'units': 'm w.e.'},
'ts_med': {'long_name': 'surface temperature median',
'units': 'K'},
'snow_depth_med': {'long_name': 'snow depth median',
'units': 'm'},
'melt_mad': {'long_name': 'glacier melt, in water equivalent, median absolute deviation',
'units': 'm w.e.'},
'ts_mad': {'long_name': 'surface temperature median absolute deviation',
'units': 'K'},
'snow_depth_mad': {'long_name': 'snow depth median absolute deviation',
'units': 'm'},
}
# Add variables to empty dataset and merge together
count_vn = 0
encoding = {}
for vn in output_coords_dict.keys():
count_vn += 1
empty_holder = np.zeros([len(output_coords_dict[vn][i]) for i in list(output_coords_dict[vn].keys())])
output_ds = xr.Dataset({vn: (list(output_coords_dict[vn].keys()), empty_holder)},
coords=output_coords_dict[vn])
# Merge datasets of stats into one output
if count_vn == 1:
output_ds_all = output_ds
else:
output_ds_all = xr.merge((output_ds_all, output_ds))
noencoding_vn = []
# Add attributes
for vn in output_ds_all.variables:
try:
output_ds_all[vn].attrs = output_attrs_dict[vn]
except:
pass
# Encoding (specify _FillValue, offsets, etc.)
if vn not in noencoding_vn:
encoding[vn] = {'_FillValue': False,
'zlib':True,
'complevel':9
}
# Add values
output_ds_all['latitude'] = lat_deg
output_ds_all['latitude'].attrs = output_attrs_dict['latitude']
output_ds_all['longitude'] = lon_deg
output_ds_all['longitude'].attrs = output_attrs_dict['longitude']
output_ds_all['time'].values = time_values
output_ds_all['hd_cm'].values = hd_cm_values
output_ds_all['elev'].values = elev_values
# Add attributes
output_ds_all.attrs = {'institution': 'University of Alaska Fairbanks, Fairbanks, AK',
'history': 'Created by <NAME> (<EMAIL>) on ' + debris_prms.date_start,
'references': 'doi:10.5194/tc-9-2295-2015'}
return output_ds_all, encoding
def solar_calcs_NOAA(year, julian_day_of_year, time_frac, longitude_deg, latitude_deg, nsteps):
""" NOAA calculations to determine the position of the sun and distance to sun
Sun position based on NOAA solar calculator
Earth-sun distance based on Stamnes (2015)
Parameters
----------
year : np.array
array of the year associated with each time step
julian_day_of_year : np.array
julian day of year associated with each time step
time_frac : np.array
time (hour + minute / 60) of each time step
longitude_deg : float
longitude in degrees
latitude_deg : float
latitude in degrees
Returns
-------
SolarZenithAngleCorr_rad : np.array
Solar zenith angle [radians] corrected for atmospheric refraction
SolarAzimuthAngle_rad : np.array
Solar azimuth angle [radians] based on degrees clockwise from north
rm_r2 : np.array
Squared mean earth-sun distance normalized by instantaneous earth-sun distance
"""
julianday_NOAA = np.zeros((nsteps))
julianCentury = np.zeros((nsteps))
GeomMeanLongSun_deg = np.zeros((nsteps))
GeomMeanLongSun_rad = np.zeros((nsteps))
GeomMeanAnomSun_deg = np.zeros((nsteps))
GeomMeanAnomSun_rad = np.zeros((nsteps))
EccentEarthOrbit = np.zeros((nsteps))
SunEqofCtr = np.zeros((nsteps))
SunTrueLong_deg = np.zeros((nsteps))
SunAppLong_deg = np.zeros((nsteps))
SunAppLong_rad = np.zeros((nsteps))
MeanObliqEcliptic_deg = np.zeros((nsteps))
ObliqCorr_deg = np.zeros((nsteps))
ObliqCorr_rad = np.zeros((nsteps))
SunDeclin_deg = np.zeros((nsteps))
SunDeclin_rad = np.zeros((nsteps))
VarY = np.zeros((nsteps))
EqofTime = np.zeros((nsteps))
TrueSolarTime = np.zeros((nsteps))
HourAngle_deg = np.zeros((nsteps))
HourAngle_rad = np.zeros((nsteps))
SolarZenithAngle_deg = np.zeros((nsteps))
SolarZenithAngle_rad = np.zeros((nsteps))
SolarElevationAngle_deg = np.zeros((nsteps))
SolarElevationAngle_rad = np.zeros((nsteps))
ApproxAtmosRefrac_deg = np.zeros((nsteps))
SolarElevationAngleCorr_deg = np.zeros((nsteps))
SolarZenithAngleCorr_deg = np.zeros((nsteps))
SolarZenithAngleCorr_rad = np.zeros((nsteps))
SolarAzimuthAngle_deg = np.zeros((nsteps))
SolarAzimuthAngle_rad = np.zeros((nsteps))
rm_r2 = np.zeros((nsteps))
# Julian day
# +1 accounts for the fact that day 1 is January 1, 1900
# 2415018.5 converts from 1900 to NOAA Julian day of year
julianday_NOAA = (np.floor(365.25*(year-1900)+1) + julian_day_of_year + (time_frac-debris_prms.timezone)/24 +
2415018.5)
# Julian Century
julianCentury = (julianday_NOAA-2451545) / 36525
# Geom Mean Long Sun
GeomMeanLongSun_deg = (280.46646 + julianCentury * (36000.76983 + julianCentury*0.0003032)) % 360
GeomMeanLongSun_rad = GeomMeanLongSun_deg * np.pi/180
# Geom Mean Anom Sun
GeomMeanAnomSun_deg = 357.52911 + julianCentury * (35999.05029 - 0.0001537*julianCentury)
GeomMeanAnomSun_rad = GeomMeanAnomSun_deg * np.pi/180
# Eccent Earth Orbit
EccentEarthOrbit = 0.016708634 - julianCentury * (0.000042037 + 0.0000001267*julianCentury)
# Sun Eq of Ctr
SunEqofCtr = (np.sin(GeomMeanAnomSun_rad) * (1.914602 - julianCentury * (0.004817 + 0.000014*julianCentury)) +
np.sin(2 * GeomMeanAnomSun_rad) * (0.019993 - 0.000101*julianCentury) +
np.sin(3 * GeomMeanAnomSun_rad) * 0.000289)
# Sun True Long
SunTrueLong_deg = GeomMeanLongSun_deg + SunEqofCtr
# Sun True Anom
#SunTrueAnom_deg = GeomMeanAnomSun_deg + SunEqofCtr
# Sun Rad Vector [AUs]
#SunRadVector = ((1.000001018 * (1 - EccentEarthOrbit * EccentEarthOrbit)) /
# (1 + EccentEarthOrbit * np.cos(SunTrueAnom_rad)))
# Sun App Long
SunAppLong_deg = SunTrueLong_deg - 0.00569 - 0.00478 * np.sin((125.04 - 1934.136*julianCentury) * np.pi/180)
SunAppLong_rad = SunAppLong_deg * np.pi/180
# Mean Obliq Ecliptic
MeanObliqEcliptic_deg = (23 + (26 + ((21.448 - julianCentury * (46.815 + julianCentury * (0.00059 -
julianCentury * 0.001813)))) / 60) / 60)
# Obliq Corr
ObliqCorr_deg = MeanObliqEcliptic_deg + 0.00256 * np.cos((125.04 - 1934.136*julianCentury) * np.pi/180)
ObliqCorr_rad = ObliqCorr_deg * np.pi/180
# Sun Rt Ascen
#SunRtAscen_deg = (180/np.pi * np.arctan((np.cos(ObliqCorr_rad) * np.sin(SunAppLong_rad)) /
# np.cos(SunAppLong_rad)))
# Sun Declin
SunDeclin_deg = 180/np.pi * np.arcsin(np.sin(ObliqCorr_rad) * np.sin(SunAppLong_rad))
SunDeclin_rad = SunDeclin_deg * np.pi/180
# VarY
VarY = np.tan(ObliqCorr_deg / 2 * np.pi/180) * np.tan(ObliqCorr_deg / 2 * np.pi/180)
# Eq of Time [min]
EqofTime = (4 * 180/np.pi * (VarY * np.sin(2 * GeomMeanLongSun_rad) - 2 * EccentEarthOrbit *
np.sin(GeomMeanAnomSun_rad) + 4 * EccentEarthOrbit * VarY * np.sin(GeomMeanAnomSun_rad) *
np.cos(2 * GeomMeanLongSun_rad) - 0.5 * VarY * VarY * np.sin(4 * GeomMeanLongSun_rad) - 1.25 *
EccentEarthOrbit * EccentEarthOrbit * np.sin(2 * GeomMeanAnomSun_rad)))
# True Solar Time [min]
TrueSolarTime = (time_frac*60*1440 + time_frac*60 + EqofTime + 4*longitude_deg - 60*debris_prms.timezone) % 1440
# Hour Angle
HourAngle_deg[TrueSolarTime/4 < 0] = TrueSolarTime[TrueSolarTime/4 < 0] / 4 + 180
HourAngle_deg[TrueSolarTime/4 >= 0] = TrueSolarTime[TrueSolarTime/4 >= 0] / 4 - 180
HourAngle_rad = HourAngle_deg * np.pi/180
# Solar Zenith Angle (deg)
SolarZenithAngle_deg = (180/np.pi * np.arccos(np.sin(latitude_deg * np.pi/180) * np.sin(SunDeclin_rad) +
np.cos(latitude_deg * np.pi/180) * np.cos(SunDeclin_rad) * np.cos(HourAngle_rad)))
SolarZenithAngle_rad = SolarZenithAngle_deg * np.pi/180
# Solar Elevation Angle (deg)
SolarElevationAngle_deg = 90 - SolarZenithAngle_deg
SolarElevationAngle_rad = SolarElevationAngle_deg * np.pi/180
# Approx Atmospheric Refraction (deg)
ApproxAtmosRefrac_deg = -20.772 / np.tan(SolarElevationAngle_rad)
ApproxAtmosRefrac_deg[SolarElevationAngle_deg > 85] = 0
mask = np.where((SolarElevationAngle_deg > 5) & (SolarElevationAngle_deg <= 85))[0]
ApproxAtmosRefrac_deg[mask] = (
58.1 / np.tan(SolarElevationAngle_rad[mask]) - 0.07 / ((np.tan(SolarElevationAngle_rad[mask]))**3) +
0.000086 / ((np.tan(SolarElevationAngle_rad[mask]))**5))
mask = np.where((SolarElevationAngle_deg > -0.575) & (SolarElevationAngle_deg <= 5))[0]
ApproxAtmosRefrac_deg[mask] = (
1735 + SolarElevationAngle_deg[mask] * (-518.2 + SolarElevationAngle_deg[mask] *
(103.4 + SolarElevationAngle_deg[mask] * (-12.79 + SolarElevationAngle_deg[mask]*0.711))))
ApproxAtmosRefrac_deg = ApproxAtmosRefrac_deg / 3600
# Solar Elevation Correct for Atm Refraction
SolarElevationAngleCorr_deg = SolarElevationAngle_deg + ApproxAtmosRefrac_deg
# Solar Zenith Angle Corrected for Atm Refraction
SolarZenithAngleCorr_deg = 90 - SolarElevationAngleCorr_deg
SolarZenithAngleCorr_rad = SolarZenithAngleCorr_deg * np.pi/180
# Solar Azimuth Angle (deg CW from N)
SolarAzimuthAngle_deg[HourAngle_deg > 0] = (
((180/np.pi * (np.arccos(np.round(((np.sin(latitude_deg * np.pi/180) *
np.cos(SolarZenithAngle_rad[HourAngle_deg > 0])) - np.sin(SunDeclin_rad[HourAngle_deg > 0])) /
(np.cos(latitude_deg * np.pi/180) * np.sin(SolarZenithAngle_rad[HourAngle_deg > 0])),12))) + 180) / 360 -
np.floor((180/np.pi * (np.arccos(np.round(((np.sin(latitude_deg * np.pi/180) *
np.cos(SolarZenithAngle_rad[HourAngle_deg > 0])) - np.sin(SunDeclin_rad[HourAngle_deg > 0])) /
(np.cos(latitude_deg * np.pi/180) * np.sin(SolarZenithAngle_rad[HourAngle_deg > 0])),12))) + 180) / 360))
* 360)
SolarAzimuthAngle_deg[HourAngle_deg <= 0] = (
((540 - 180/np.pi * (np.arccos(np.round(((np.sin(latitude_deg * np.pi/180) *
np.cos(SolarZenithAngle_rad[HourAngle_deg <= 0])) - np.sin(SunDeclin_rad[HourAngle_deg <= 0])) /
(np.cos(latitude_deg * np.pi/180) * np.sin(SolarZenithAngle_rad[HourAngle_deg <= 0])),12)))) / 360 -
np.floor((540 - 180/np.pi * (np.arccos(np.round(((np.sin(latitude_deg * np.pi/180) *
np.cos(SolarZenithAngle_rad[HourAngle_deg <= 0])) - np.sin(SunDeclin_rad[HourAngle_deg <= 0])) /
(np.cos(latitude_deg * np.pi/180) * np.sin(SolarZenithAngle_rad[HourAngle_deg <= 0])),12)))) / 360)) * 360)
SolarAzimuthAngle_rad = SolarAzimuthAngle_deg * np.pi/180
# Distance from sun based on eccentricity of orbit (r/rm)^2 based on Stamnes (2015)
# Day number [radians]
dn_rad = julian_day_of_year * 2 * np.pi / 365
rm_r2 = (1 / (1.000110 + 0.034221 * np.cos(dn_rad) + 0.001280 * np.sin(dn_rad) + 0.000719 *
np.cos(2 * dn_rad) + 0.000077 * np.sin(2 * dn_rad)))**2
return SolarZenithAngleCorr_rad, SolarAzimuthAngle_rad, rm_r2
def CrankNicholson(Td, Tair, i, debris_thickness, N, h, C, a_Crank, b_Crank, c_Crank, d_Crank, A_Crank, S_Crank):
""" Run Crank-Nicholson scheme to obtain debris temperature
Parameters
----------
Td : np.array
debris temperature [k] (rows = internal layers, columns = timestep)
Tair : np.array
air temperature [K]
i : int
step number
debris_thickness : float
debris thickness [m]
N : int
number of layers
h : float
height of debris layers [m]
C : float
constant defined by Reid and Brock (2010) for Crank-Nicholson Scheme
a_Crank,
Returns
-------
Td : np.array
updated debris temperature [k] (rows = internal layers, columns = timestep)
"""
# Calculate temperature profile in the debris
# For t = 0, which is i = 1, assume initial condition of linear temperature profile in the debris
if i == 0:
Td_gradient = (Td[0,0] - Td[N-1,0])/debris_thickness
# CODE IMPROVEMENT HERE: TD CALCULATION SKIPPED ONE
for j in np.arange(1,N-1):
Td[j,0] = Td[0,0] - (j*h)*Td_gradient
else:
# Perform Crank-Nicholson Scheme
for j in np.arange(1,N-1):
# Equations A8 in Reid and Brock (2010)
a_Crank[j,i] = C
b_Crank[j,i] = 2*C+1
c_Crank[j,i] = C
# Equations A9 in Reid and Brock (2010)
if j == 1:
d_Crank[j,i] = C*Td[0,i] + C*Td[0,i-1] + (1-2*C)*Td[j,i-1] + C*Td[j+1,i-1]
elif j < (N-2):
d_Crank[j,i] = C*Td[j-1,i-1] + (1-2*C)*Td[j,i-1] + C*Td[j+1,i-1]
elif j == (N-2):
d_Crank[j,i] = 2*C*Td[N-1,i] + C*Td[N-3,i-1] + (1-2*C)*Td[N-2,i-1]
# note notation:
# "i-1" refers to the past
# "j-1" refers to the cell above it
# "j+1" refers to the cell below it
# Equations A10 and A11 in Reid and Brock (2010)
if j == 1:
A_Crank[j,i] = b_Crank[j,i]
S_Crank[j,i] = d_Crank[j,i]
else:
A_Crank[j,i] = b_Crank[j,i] - a_Crank[j,i] / A_Crank[j-1,i] * c_Crank[j-1,i]
S_Crank[j,i] = d_Crank[j,i] + a_Crank[j,i] / A_Crank[j-1,i] * S_Crank[j-1,i]
# Equations A12 in Reid and Brock (2010)
for j in np.arange(N-2,0,-1):
if j == (N-2):
Td[j,i] = S_Crank[j,i] / A_Crank[j,i]
else:
Td[j,i] = 1 / A_Crank[j,i] * (S_Crank[j,i] + c_Crank[j,i] * Td[j+1,i])
return Td
def calc_surface_fluxes(Td_i, Tair_i, RH_AWS_i, u_AWS_i, Sin_i, Lin_AWS_i, Rain_AWS_i, snow_i, P, Albedo, k,
a_neutral_debris, h, dsnow_t0, tsnow_t0, snow_tau_t0, ill_angle_rad_i, a_neutral_snow,
debris_thickness,
option_snow=0, option_snow_fromAWS=0, i_step=None):
""" Calculate surface energy fluxes for timestep i
Snow model uses a modified version of Tarboten and Luce (1996) to compute fluxes
- Sin calculated above, not with their local slope and illumination corrections though
they are likely similar
- Ground heat flux is computed from the debris, not using their estimates from diurnal
soil temperatures
- Do not use their temperature threshold for snowfall, but use set value
- For P_flux, we do not include the snow fall component because it alters the cold content of the snow pack
therefore we don't want to double count this energy
- Do not account for wind redistribution of snow, which they state is site specific
- Do not iterate to solve snow temperature, but do a depth average
- Solve for the thermal conductivity at the debris/ice interface using the depth of snow and debris height
Limitation:
- If allow all negative energy to warm up the snowpack and the snowpack is very thin (< 1 cm), then the
change in temperature can be extreme (-10 to -1000s of degrees), which is unrealistic.
More realistic is that the snowpack may change its temperature and the remaining energy will be
transferred to also cool the debris layer. Set maximum temperature change of the snow pack during any given
time step to 1 degC.
Note: since partitioning rain into snow, units are automatically m w.e.
hence, the density of snow is not important
Future work:
- Currently, the debris/ice interface is set to 273.15.
This is fine during the ablation season; however, when the debris freezes in the winter
- Snow melt water should theoretically percolate into the debris and transfer energy
Parameters
----------
Td_i : np.array
debris temperature
Tair_i, RH_AWS_i, u_AWS_i, Sin_i, Lin_AWS_i, Rain_AWS_i, snow_i : floats
meteorological data
P : float
pressure [Pa]
Albedo, k, a_neutral_debris : floats
debris albedo, thermal conductivity, and turbulent heat flux transfer coefficient (from surface roughness)
h : float
debris layer height [m]
dsnow_t0, tsnow_t0, snow_tau_t0
snow depth, temperature and dimensionless age at start of time step before any snow or melt has occurred
ill_angle_rad_i : float
solar illumination angle used to adjust snow albedo
a_neutral_snow : float
snow turbulent heat flux transfer coefficient (based on surface roughness)
option_snow : int
switch to use snow model (1) or not (0)
option_snow_fromAWS : int
switch to use snow depth (1) instead of snow fall (0)
Returns
-------
F_Ts_i, Rn_i, LE_i, H_i, P_flux_i, Qc_i : floats
Energy fluxes [W m-2]
dF_Ts_i, dRn_i, dLE_i, dH_i, dP_flux_i, dQc_i : floats
Derivatives of energy fluxes
dsnow_i : float
Snow depth [mwe] at end of time step
tsnow_i : float
Snow temperature at end of time step
snow_tau_i : float
Non-dimensional snow age at end of time step
"""
# Snow depth [m w.e.]
dsnow_i = dsnow_t0 + snow_i
snow_tau_i = snow_tau_t0
tsnow_i = 273.15
# First option: Snow depth is based on snow fall, so need to melt snow
if dsnow_i > 0 and option_snow==1 and option_snow_fromAWS == 0:
tsnow_i = (dsnow_t0 * tsnow_t0 + snow_i * Tair_i) / dsnow_i
# Thermal conductivity at debris/snow interface assuming conductance resistance is additive
# estimating the heat transfer through dsnow_eff layer of snow and h_eff layer of debris
# Tarboten and Luce (1996) use effective soil depth of 0.4 m for computing the ground heat transfer
if debris_thickness < 0.4:
h_eff = debris_thickness
else:
h_eff = 0.4
if dsnow_i < 0.4:
dsnow_eff = dsnow_i
else:
dsnow_eff = 0.4
k_snow_interface = (h_eff + dsnow_eff) / (dsnow_eff/debris_prms.k_snow + h_eff/k)
# Previously estimating it based on equal parts
#k_snow_interface = h / ((0.5 * h) / debris_prms.k_snow + (0.5*h) / k)
# Density of air (dry) based on pressure (elevation) and temperature
# used in snow calculations, which has different parameterization of turbulent fluxes
# compared to the debris
density_air = P / (287.058 * Tair_i)
# Albedo
# parameters representing grain growth due to vapor diffusion (r1), additional effect near
# and at freezing point due to melt and refreeze (r2), and the effect of dirt and soot (r3)
snow_r1 = np.exp(5000 * (1 / 273.16 - 1 / tsnow_i))
snow_r2 = np.min([snow_r1**10, 1])
snow_r3 = 0.03 # change to 0.01 if in Antarctica
# change in non-dimensional snow surface age
snow_tau_i += (snow_r1 + snow_r2 + snow_r3) / debris_prms.snow_tau_0 * debris_prms.delta_t
# new snow affect on snow age
if snow_i > 0.01:
snow_tau_i = 0
elif snow_i > 0:
snow_tau_i = snow_tau_i * (1 - 100 * snow_i)
# snow age
snow_age = snow_tau_i / (1 + snow_tau_i)
# albedo as a function of snow age and band
albedo_vd = (1 - debris_prms.snow_c_v * snow_age) * debris_prms.albedo_vo
albedo_ird = (1 - debris_prms.snow_c_ir * snow_age) * debris_prms.albedo_iro
# increase in albedo based on illumination angle
# illumination angle measured relative to the surface normal
if np.cos(ill_angle_rad_i) < 0.5:
b_ill = 2
f_psi = 1/b_ill * ((1 + b_ill) / (1 + 2 * b_ill * np.cos(ill_angle_rad_i)) - 1)
else:
f_psi = 0
albedo_v = albedo_vd + 0.4 * f_psi * (1 - albedo_vd)
albedo_ir = albedo_ird + 0.4 * f_psi * (1 - albedo_ird)
albedo_snow = np.mean([albedo_v, albedo_ir])
# Adjustments to albedo
# ensure albedo is within bounds
if albedo_snow > 0.9:
albedo_snow = 0.9
elif albedo_snow < 0:
albedo_snow = 0
# if snow less than 0.1 m, then underlying debris influences albedo
if dsnow_i < 0.1:
r_adj = (1 - dsnow_i/0.1)*np.exp(dsnow_i / (2*0.1))
albedo_snow = r_adj * Albedo + (1 - r_adj) * albedo_snow
# Snow Energy Balance
Rn_snow = (Sin_i * (1 - albedo_snow) + debris_prms.emissivity_snow * (Lin_AWS_i -
(debris_prms.stefan_boltzmann * tsnow_i**4)))
H_snow = a_neutral_snow * density_air * debris_prms.cA * u_AWS_i * (Tair_i - tsnow_i)
# Vapor pressure above snow assumed to be saturated
# Vapor pressure (e, Pa) computed using Clasius-Clapeyron Equation and Relative Humidity
# 611 is the vapor pressure of ice and liquid water at melting temperature (273.15 K)
eZ_Saturated = 611 * np.exp(debris_prms.Lv / debris_prms.R_const * (1 / 273.15 - 1 / Tair_i))
eZ = RH_AWS_i * eZ_Saturated
# Vapor pressure of snow based on temperature (Colbeck, 1990)
e_snow = debris_prms.eS_snow * np.exp(2838 * (tsnow_i - 273.15) / (0.4619 * tsnow_i * 273.15))
if e_snow > debris_prms.eS_snow:
e_snow = debris_prms.eS_snow
LE_snow = 0.622 * debris_prms.Ls / (debris_prms.Rd * Tair_i) * a_neutral_snow * u_AWS_i * (eZ - e_snow)
Pflux_snow = (Rain_AWS_i * (debris_prms.Lf * debris_prms.density_water + debris_prms.cW *
debris_prms.density_water *
(np.max([273.15, Tair_i]) - 273.15)) / debris_prms.delta_t)
Qc_snow_debris = k_snow_interface * (Td_i[0] - tsnow_i)/h
# Net energy available for snow depends on latent heat flux
# if Positive LE: Air > snow vapor pressure (condensation/resublimation)
# energy released and available to melt the snow (include LE in net energy)
if LE_snow > 0:
Fnet_snow = Rn_snow + H_snow + LE_snow + Pflux_snow + Qc_snow_debris
snow_sublimation = 0
# if Negative LE: Air < snow vapor pressure (sublimation/evaporation)
# energy consumed and snow sublimates (do not include LE in net energy)
else:
Fnet_snow = Rn_snow + H_snow + Pflux_snow + Qc_snow_debris
# Snow sublimation [m w.e.]
snow_sublimation = -1 * LE_snow / (debris_prms.density_water * debris_prms.Lv) * debris_prms.delta_t
# Cold content of snow [W m2]
Qcc_snow = debris_prms.cSnow * debris_prms.density_water * dsnow_i * (273.15 - tsnow_i) / debris_prms.delta_t
# Max energy spent cooling snowpack based on 1 degree temperature change
Qcc_snow_neg1 = -1 * debris_prms.cSnow * debris_prms.density_water * dsnow_i / debris_prms.delta_t
# If Fnet_snow is positive and greater than cold content, then energy is going to warm the
# snowpack to melting point and begin melting the snow.
if Fnet_snow > Qcc_snow:
# Snow warmed up to melting temperature
tsnow_i = 273.15
Fnet_snow -= Qcc_snow
Fnet_snow2debris = 0
elif Fnet_snow < Qcc_snow_neg1:
# Otherwise only changes the temperature in the snowpack and the debris
# limit the change in snow temperature
tsnow_i -= 1
# Remaining energy goes to cool down the debris
Fnet_snow2debris = Fnet_snow - Qcc_snow_neg1
Fnet_snow = 0
# Set maximum energy to cool debris top layer by 1 degree
# otherwise, this can become very unstable since the turbulent heat fluxes are set by the snow surfaces
Fnet_snow2debris_max = -1* debris_prms.c_d * debris_prms.row_d * h / debris_prms.delta_t
if Fnet_snow2debris < Fnet_snow2debris_max:
Fnet_snow2debris = Fnet_snow2debris_max
else:
# Otherwise only changes the temperature
tsnow_i += Fnet_snow / (debris_prms.cSnow * debris_prms.density_water * dsnow_i) * debris_prms.delta_t
Fnet_snow = 0
Fnet_snow2debris = 0
# Snow melt [m snow] with remaining energy, if any
snow_melt_energy = Fnet_snow / (debris_prms.density_water * debris_prms.Lf) * debris_prms.delta_t
# Total snow melt
snow_melt = snow_melt_energy + snow_sublimation
# Snow depth [m w.e.]
dsnow_i -= snow_melt
if dsnow_i < 0:
dsnow_i = 0
if dsnow_i == 0:
snow_tau_i = 0
# Solve for temperature in debris
# Rn, LE, H, and P equal 0
Rn_i = 0
LE_i = 0
H_i = 0
Qc_i = k * (Td_i[1] - Td_i[0]) / h
P_flux_i = 0
Qc_snow_i = -Qc_snow_debris
F_Ts_i = Rn_i + LE_i + H_i + Qc_i + P_flux_i + Qc_snow_i + Fnet_snow2debris
dRn_i = 0
dLE_i = 0
dH_i = 0
dQc_i = -k/h
dP_flux_i = 0
dQc_snow_i = -k_snow_interface/h
dF_Ts_i = dRn_i + dLE_i + dH_i + dQc_i + dP_flux_i + dQc_snow_i
# Second option: Snow depth is prescribed from AWS, so don't need to melt snow
elif dsnow_i > 0 and option_snow==1 and option_snow_fromAWS == 1:
dsnow_i = snow_i
tsnow_i = Tair_i
if tsnow_i > 273.15:
tsnow_i = 273.15
# Thermal conductivity at debris/snow interface assuming conductance resistance is additive
# estimating the heat transfer through dsnow_eff layer of snow and h_eff layer of debris
# Tarboten and Luce (1996) use effective soil depth of 0.4 m for computing the ground heat transfer
if debris_thickness < 0.4:
h_eff = debris_thickness
else:
h_eff = 0.4
if dsnow_i < 0.4:
dsnow_eff = dsnow_i
else:
dsnow_eff = 0.4
k_snow_interface = (h_eff + dsnow_eff) / (dsnow_eff/debris_prms.k_snow + h_eff/k)
Qc_snow_debris = k_snow_interface * (Td_i[0] - tsnow_i)/h
# Solve for temperature in debris
# Rn, LE, H, and P equal 0
Rn_i = 0
LE_i = 0
H_i = 0
Qc_i = k * (Td_i[1] - Td_i[0]) / h
P_flux_i = 0
Qc_snow_i = -Qc_snow_debris
Fnet_snow2debris = 0
F_Ts_i = Rn_i + LE_i + H_i + Qc_i + P_flux_i + Qc_snow_i
dRn_i = 0
dLE_i = 0
dH_i = 0
dQc_i = -k/h
dP_flux_i = 0
dQc_snow_i = -k_snow_interface/h
dF_Ts_i = dRn_i + dLE_i + dH_i + dQc_i + dP_flux_i + dQc_snow_i
else:
# Debris-covered glacier Energy Balance (no snow)
if Rain_AWS_i > 0:
# Vapor pressure (e, Pa) computed using Clasius-Clapeyron Equation and Relative Humidity
# 611 is the vapor pressure of ice and liquid water at melting temperature (273.15 K)
# if raining, assume the surface is saturated
eS_Saturated = 611 * np.exp(-debris_prms.Lv / debris_prms.R_const * (1 / Td_i[0] - 1 / 273.15))
eS = eS_Saturated
eZ_Saturated = 611 * np.exp(-debris_prms.Lv / debris_prms.R_const * (1 / Tair_i - 1 / 273.15))
eZ = RH_AWS_i * eZ_Saturated
LE_i = (0.622 * debris_prms.density_air_0 / debris_prms.P0 * debris_prms.Lv * a_neutral_debris * u_AWS_i
* (eZ -eS))
else:
LE_i = 0
Rn_i = Sin_i * (1 - Albedo) + debris_prms.emissivity * (Lin_AWS_i - (5.67e-8 * Td_i[0]**4))
H_i = (debris_prms.density_air_0 * (P / debris_prms.P0) * debris_prms.cA * a_neutral_debris * u_AWS_i *
(Tair_i - Td_i[0]))
P_flux_i = debris_prms.density_water * debris_prms.cW * Rain_AWS_i / debris_prms.delta_t * (Tair_i - Td_i[0])
Qc_i = k * (Td_i[1] - Td_i[0]) / h
F_Ts_i = Rn_i + LE_i + H_i + Qc_i + P_flux_i
# Derivatives
if Rain_AWS_i > 0:
dLE_i = (-0.622 * debris_prms.density_air_0 / debris_prms.P0 * debris_prms.Lv * a_neutral_debris *
u_AWS_i * 611 * np.exp(-debris_prms.Lv / debris_prms.R_const * (1 / Td_i[0] - 1 / 273.15))
* (debris_prms.Lv / debris_prms.R_const * Td_i[0]**-2))
else:
dLE_i = 0
dRn_i = -4 * debris_prms.emissivity * 5.67e-8 * Td_i[0]**3
dH_i = -1 * debris_prms.density_air_0 * P / debris_prms.P0 * debris_prms.cA * a_neutral_debris * u_AWS_i
dP_flux_i = -debris_prms.density_water * debris_prms.cW * Rain_AWS_i/ debris_prms.delta_t
dQc_i = -k / h
dF_Ts_i = dRn_i + dLE_i + dH_i + dQc_i + dP_flux_i
return (F_Ts_i, Rn_i, LE_i, H_i, P_flux_i, Qc_i, dF_Ts_i, dRn_i, dLE_i, dH_i, dP_flux_i, dQc_i,
dsnow_i, tsnow_i, snow_tau_i)
def calc_surface_fluxes_cleanice(Tair_i, RH_AWS_i, u_AWS_i, Sin_i, Lin_AWS_i, Rain_AWS_i, snow_i, P, Albedo,
a_neutral_ice, dsnow_t0, tsnow_t0, snow_tau_t0, ill_angle_rad_i, a_neutral_snow,
option_snow=0, option_snow_fromAWS=0, i_step=None):
""" Calculate surface energy fluxes for timestep i
Snow model uses a modified version of Tarboten and Luce (1996) to compute fluxes
- Sin calculated above, not with their local slope and illumination corrections though
they are likely similar
- Ground heat flux is computed from the debris, not using their estimates from diurnal
soil temperatures
- Do not use their temperature threshold for snowfall, but use set value
- For P_flux, we do not include the snow fall component because it alters the cold content of the snow pack
therefore we don't want to double count this energy
- Do not account for wind redistribution of snow, which they state is site specific
- Do not iterate to solve snow temperature, but do a depth average
- Solve for the thermal conductivity at the debris/ice interface using the depth of snow and debris height
Limitation:
- If allow all negative energy to warm up the snowpack and the snowpack is very thin (< 1 cm), then the
change in temperature can be extreme (-10 to -1000s of degrees), which is unrealistic.
More realistic is that the snowpack may change its temperature and the remaining energy will be
transferred to also cool the debris layer. Set maximum temperature change of the snow pack during any given
time step to 1 degC.
Note: since partitioning rain into snow, units are automatically m w.e.
hence, the density of snow is not important
Future work:
- Currently, the debris/ice interface is set to 273.15.
This is fine during the ablation season; however, when the debris freezes in the winter
- Snow melt water should theoretically percolate into the debris and transfer energy
Parameters
----------
Tair_i, RH_AWS_i, u_AWS_i, Sin_i, Lin_AWS_i, Rain_AWS_i, snow_i : floats
meteorological data
P : float
pressure [Pa] used for snow turbulent heat calculations
Albedo, a_neutral_ice : floats
albedo and turbulent heat flux transfer coefficient (from surface roughness)
dsnow_t0, tsnow_t0, snow_tau_t0
snow depth, temperature and dimensionless age at start of time step before any snow or melt has occurred
ill_angle_rad_i : float
solar illumination angle used to adjust snow albedo
a_neutral_snow : float
snow turbulent heat flux transfer coefficient (based on surface roughness)
option_snow : int
switch to use snow model (1) or not (0)
option_snow_fromAWS : int
switch to use snow depth (1) instead of snow fall (0)
Returns
-------
F_Ts_i, Rn_i, LE_i, H_i, P_flux_i, Qc_i : floats
Energy fluxes [W m-2]
dF_Ts_i, dRn_i, dLE_i, dH_i, dP_flux_i, dQc_i : floats
Derivatives of energy fluxes
dsnow_i : float
Snow depth [mwe] at end of time step
tsnow_i : float
Snow temperature at end of time step
snow_tau_i : float
Non-dimensional snow age at end of time step
"""
# Snow depth [m w.e.]
dsnow_i = dsnow_t0 + snow_i
snow_tau_i = snow_tau_t0
tsnow_i = 0
# First option: Snow depth is based on snow fall, so need to melt snow
if dsnow_i > 0 and option_snow==1 and option_snow_fromAWS == 0:
tsnow_i = (dsnow_t0 * tsnow_t0 + snow_i * Tair_i) / dsnow_i
# Density of air (dry) based on pressure (elevation) and temperature
# used in snow calculations, which has different parameterization of turbulent fluxes
# compared to the debris
density_air = P / (287.058 * Tair_i)
# Albedo
# parameters representing grain growth due to vapor diffusion (r1), additional effect near
# and at freezing point due to melt and refreeze (r2), and the effect of dirt and soot (r3)
snow_r1 = np.exp(5000 * (1 / 273.16 - 1 / tsnow_i))
snow_r2 = np.min([snow_r1**10, 1])
snow_r3 = 0.03 # change to 0.01 if in Antarctica
# change in non-dimensional snow surface age
snow_tau_i += (snow_r1 + snow_r2 + snow_r3) / debris_prms.snow_tau_0 * debris_prms.delta_t
# new snow affect on snow age
if snow_i > 0.01:
snow_tau_i = 0
elif snow_i > 0:
snow_tau_i = snow_tau_i * (1 - 100 * snow_i)
# snow age
snow_age = snow_tau_i / (1 + snow_tau_i)
# albedo as a function of snow age and band
albedo_vd = (1 - debris_prms.snow_c_v * snow_age) * debris_prms.albedo_vo
albedo_ird = (1 - debris_prms.snow_c_ir * snow_age) * debris_prms.albedo_iro
# increase in albedo based on illumination angle
# illumination angle measured relative to the surface normal
if np.cos(ill_angle_rad_i) < 0.5:
b_ill = 2
f_psi = 1/b_ill * ((1 + b_ill) / (1 + 2 * b_ill * np.cos(ill_angle_rad_i)) - 1)
else:
f_psi = 0
albedo_v = albedo_vd + 0.4 * f_psi * (1 - albedo_vd)
albedo_ir = albedo_ird + 0.4 * f_psi * (1 - albedo_ird)
albedo_snow = np.mean([albedo_v, albedo_ir])
# Adjustments to albedo
# ensure albedo is within bounds (Hock and Holmgren, 2005)
if albedo_snow > 0.9:
albedo_snow = 0.9
elif albedo_snow < 0:
albedo_snow = 0
# if snow less than 0.1 m, then underlying debris influences albedo
if dsnow_i < 0.1:
r_adj = (1 - dsnow_i/0.1)*np.exp(dsnow_i / (2*0.1))
albedo_snow = r_adj * Albedo + (1 - r_adj) * albedo_snow
# Snow Energy Balance
Rn_snow = (Sin_i * (1 - albedo_snow) + debris_prms.emissivity_snow * (Lin_AWS_i -
(debris_prms.stefan_boltzmann * tsnow_i**4)))
H_snow = a_neutral_snow * density_air * debris_prms.cA * u_AWS_i * (Tair_i - tsnow_i)
# Vapor pressure above snow assumed to be saturated
# Vapor pressure (e, Pa) computed using Clasius-Clapeyron Equation and Relative Humidity
# 611 is the vapor pressure of ice and liquid water at melting temperature (273.15 K)
eZ_Saturated = 611 * np.exp(debris_prms.Lv / debris_prms.R_const * (1 / 273.15 - 1 / Tair_i))
eZ = RH_AWS_i * eZ_Saturated
# Vapor pressure of snow based on temperature (Colbeck, 1990)
e_snow = debris_prms.eS_snow * np.exp(2838 * (tsnow_i - 273.15) / (0.4619 * tsnow_i * 273.15))
if e_snow > debris_prms.eS_snow:
e_snow = debris_prms.eS_snow
LE_snow = 0.622 * debris_prms.Ls / (debris_prms.Rd * Tair_i) * a_neutral_snow * u_AWS_i * (eZ - e_snow)
Pflux_snow = (Rain_AWS_i * (debris_prms.Lf * debris_prms.density_water + debris_prms.cW *
debris_prms.density_water * (np.max([273.15, Tair_i]) - 273.15)) /
debris_prms.delta_t)
# Assume no flux between the snow and ice (Huss and Holmgren 2005)
Qc_snow_ice = 0
# Net energy available for snow depends on latent heat flux
# if Positive LE: Air > snow vapor pressure (condensation/resublimation)
# energy released and available to melt the snow (include LE in net energy)
if LE_snow > 0:
Fnet_snow = Rn_snow + H_snow + LE_snow + Pflux_snow + Qc_snow_ice
snow_sublimation = 0
# if Negative LE: Air < snow vapor pressure (sublimation/evaporation)
# energy consumed and snow sublimates (do not include LE in net energy)
else:
Fnet_snow = Rn_snow + H_snow + Pflux_snow + Qc_snow_ice
# Snow sublimation [m w.e.]
snow_sublimation = -1 * LE_snow / (debris_prms.density_water * debris_prms.Lv) * debris_prms.delta_t
# Cold content of snow [W m2]
Qcc_snow = debris_prms.cSnow * debris_prms.density_water * dsnow_i * (273.15 - tsnow_i) / debris_prms.delta_t
# Max energy spent cooling snowpack based on 1 degree temperature change
Qcc_snow_neg1 = -1 * debris_prms.cSnow * debris_prms.density_water * dsnow_i / debris_prms.delta_t
# if i_step > 39900 and i_step < 40000:
# print('i:', i_step, 'albedo:', np.round(albedo_snow,2), 'dsnow:', np.round(dsnow_i,3),
# 'Rn_snow:', np.round(Rn_snow), 'Tair:', np.round(Tair_i, 1))
# If Fnet_snow is positive and greater than cold content, then energy is going to warm the
# snowpack to melting point and begin melting the snow.
if Fnet_snow > Qcc_snow:
# Snow warmed up to melting temperature
tsnow_i = 273.15
Fnet_snow -= Qcc_snow
elif Fnet_snow < Qcc_snow_neg1:
# Otherwise only changes the temperature in the snowpack and the debris
# limit the change in snow temperature
tsnow_i -= 1
Fnet_snow = 0
else:
# Otherwise only changes the temperature
tsnow_i += Fnet_snow / (debris_prms.cSnow * debris_prms.density_water * dsnow_i) * debris_prms.delta_t
Fnet_snow = 0
# Snow melt [m snow] with remaining energy, if any
snow_melt_energy = Fnet_snow / (debris_prms.density_water * debris_prms.Lf) * debris_prms.delta_t
# Total snow melt
snow_melt = snow_melt_energy + snow_sublimation
# if i_step > 39900 and i_step < 40000:
# print(' snow melt:', np.round(snow_melt,3), 'tsnow:', np.round(tsnow_i,2))
# Snow depth [m w.e.]
dsnow_i -= snow_melt
if dsnow_i < 0:
dsnow_i = 0
if dsnow_i == 0:
snow_tau_i = 0
# Solve for temperature in debris
# Rn, LE, H, and P equal 0
Rn_i = 0
LE_i = 0
H_i = 0
Qc_i = 0
P_flux_i = 0
Qc_snow_i = 0
F_Ts_i = Rn_i + LE_i + H_i + Qc_i + P_flux_i + Qc_snow_i
# Second option: Snow depth is prescribed from AWS, so don't need to melt snow
elif dsnow_i > 0 and option_snow==1 and option_snow_fromAWS == 1:
dsnow_i = snow_i
# Set everything to zero as no melting while there is snow on the surface
Rn_i = 0
LE_i = 0
H_i = 0
Qc_i = 0
P_flux_i = 0
Qc_snow_i = 0
F_Ts_i = 0
else:
# Clean ice glacier Energy Balance (no snow)
# Vapor pressure (e, Pa) computed using Clasius-Clapeyron Equation and Relative Humidity
# 611 is the vapor pressure of ice and liquid water at melting temperature (273.15 K)
eS_Saturated = 611
eS = eS_Saturated
eZ_Saturated = 611 * np.exp(-debris_prms.Lv / debris_prms.R_const * (1 / Tair_i - 1 / 273.15))
eZ = RH_AWS_i * eZ_Saturated
LE_i = (0.622 * debris_prms.density_air_0 / debris_prms.P0 * debris_prms.Lv * a_neutral_ice * u_AWS_i
* (eZ -eS))
Rn_i = Sin_i * (1 - Albedo) + debris_prms.emissivity * (Lin_AWS_i - (5.67e-8 * 273.15**4))
H_i = (debris_prms.density_air_0 * (P / debris_prms.P0) * debris_prms.cA * a_neutral_ice * u_AWS_i *
(Tair_i - 273.15))
P_flux_i = debris_prms.density_water * debris_prms.cW * Rain_AWS_i / debris_prms.delta_t * (Tair_i - 273.15)
# Ground heat flux
Qc_i = 0
F_Ts_i = Rn_i + LE_i + H_i + Qc_i + P_flux_i
return F_Ts_i, Rn_i, LE_i, H_i, P_flux_i, Qc_i, dsnow_i, tsnow_i, snow_tau_i
def main(list_packed_vars):
"""
Model simulation
Parameters
----------
list_packed_vars : list
list of packed variables that enable the use of parallels
Returns
-------
netcdf files of the simulation output (specific output is dependent on the output option)
"""
# Unpack variables
count = list_packed_vars[0]
latlon_list = list_packed_vars[1]
debris_thickness_all = np.array(list_packed_vars[2])
if debug:
print(count, latlon_list)
for nlatlon, latlon in enumerate(latlon_list):
if debug:
print(nlatlon, latlon)
lat_deg = latlon[0]
lon_deg = latlon[1]
# Slope_AWS_rad = 0
# Aspect_AWS_rad = 0
# P_AWS = debris_prms.P0*np.exp(-0.0289644*9.81*debris_prms.Elev_AWS/(8.31447*288.15)) # Pressure at Pyr Station
# ===== Meteorological data =====
metdata_fn = debris_prms.metdata_fn_sample.replace('XXXX',
str(int(lat_deg*100)) + 'N-' + str(int(lon_deg*100)) + 'E-')
if lat_deg < 0:
lat_str = 'S-'
else:
lat_str = 'N-'
metdata_fn = debris_prms.metdata_fn_sample.replace('XXXX', str(int(np.abs(lat_deg)*100)) + lat_str +
str(int(lon_deg*100)) + 'E-')
ds = xr.open_dataset(debris_prms.metdata_fp + metdata_fn)
# Time information
time_pd_all = pd.to_datetime(ds.time.values)
year_all = np.array(time_pd_all.year)
month_all = np.array(time_pd_all.month)
day_all = np.array(time_pd_all.day)
hour_all = np.array(time_pd_all.hour)
minute_all = np.array(time_pd_all.minute)
time_yymmdd_all = [str(year_all[x]) + '-' + str(month_all[x]).zfill(2) + '-' + str(day_all[x]).zfill(2)
for x in np.arange(0,len(time_pd_all))]
# Time Indices
start_idx = time_yymmdd_all.index(debris_prms.start_date)
end_idx = time_yymmdd_all.index(debris_prms.end_date) + 23
# Subsets
time_pd = time_pd_all[start_idx:end_idx+1]
year = year_all[start_idx:end_idx+1]
month = month_all[start_idx:end_idx+1]
day = day_all[start_idx:end_idx+1]
hour = hour_all[start_idx:end_idx+1]
minute = minute_all[start_idx:end_idx+1]
# Elevations
elev_list = []
for elev_cn in debris_prms.elev_cns:
if elev_cn == 'zmean':
elev_list.append(int(np.round(ds['dc_zmean'].values,0)))
elif elev_cn == 'zstdlow':
elev_list.append(int(np.round(ds['dc_zmean'].values - ds['dc_zstd'].values,0)))
elif elev_cn == 'zstdhigh':
elev_list.append(int(np.round(ds['dc_zmean'].values + ds['dc_zstd'].values,0)))
# Create output file
output_ds_all, encoding = create_xrdataset(debris_thickness_all=debris_thickness_all, lat_deg=lat_deg,
lon_deg=lon_deg, time_values=time_pd, elev_values=elev_list)
# Load meteorological data
# Air temperature
Tair_AWS = ds['t2m'][start_idx:end_idx+1].values
# Relative humidity
RH_AWS = ds['rh'][start_idx:end_idx+1].values / 100
RH_AWS[RH_AWS<0] = 0
RH_AWS[RH_AWS>1] = 1
# Wind speed
u_AWS_x = ds['u10'][start_idx:end_idx+1].values
u_AWS_y = ds['v10'][start_idx:end_idx+1].values
u_AWS_raw = (u_AWS_x**2 + u_AWS_y**2)**0.5
# Total Precipitation
Rain_AWS = ds['tp'][start_idx:end_idx+1].values
# Incoming shortwave radiation
Sin_AWS = ds['ssrd'][start_idx:end_idx+1].values / 3600
Sin_AWS[Sin_AWS < 0.1] = 0
# Incoming longwave radiation
Lin_AWS = ds['strd'][start_idx:end_idx+1].values / 3600
# Elevation
Elev_AWS = ds['z'].values
# Assume snow not provided by AWS
Snow_AWS = None
# Assume no adjustments for slope/aspect from AWS
Sin_timeseries = Sin_AWS
# Lapse rate (monthly)
if debris_prms.option_lr_fromdata == 1:
ds_lr = xr.open_dataset(debris_prms.metdata_lr_fullfn)
lat_idx = np.abs(lat_deg - ds_lr ['latitude'].values).argmin()
lon_idx = np.abs(lon_deg - ds_lr ['longitude'].values).argmin()
lr_monthly_all = ds_lr['lapserate'][:,lat_idx,lon_idx].values
lr_time_pd_all = | pd.to_datetime(ds_lr.time.values) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 15:50:20 2019
work flow for ZWD and PW retreival after python copy_gipsyx_post_from_geo.py:
1)save_PPP_field_unselected_data_and_errors(field='ZWD')
2)select_PPP_field_thresh_and_combine_save_all(field='ZWD')
3)use mean_ZWD_over_sound_time_and_fit_tstm to obtain the mda (model dataarray)
3*) can't use produce_kappa_ml_with_cats for hour on 5 mins data, dahhh!
can do that with dayofyear, month, season (need to implement it first)
4)save_GNSS_PW_israeli_stations using mda (e.g., season) from 3
5) do homogenization using Homogenization_R.py and run homogenize_pw_dataset
6) for hydro analysis and more run produce_all_GNSS_PW_anomalies
@author: shlomi
"""
import pandas as pd
import numpy as np
from PW_paths import work_yuval
from PW_paths import work_path
from PW_paths import geo_path
from pathlib import Path
from sklearn.linear_model import LinearRegression
from scipy import stats
hydro_path = work_yuval / 'hydro'
garner_path = work_yuval / 'garner'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
sound_path = work_yuval / 'sounding'
climate_path = work_yuval / 'climate'
dem_path = work_yuval / 'AW3D30'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
tela_zwd = work_yuval / 'gipsyx_results/tela_newocean/TELA_PPP_1996-2019.nc'
jslm_zwd = work_yuval / 'gipsyx_results/jslm_newocean/JSLM_PPP_2001-2019.nc'
alon_zwd = work_yuval / 'gipsyx_results/alon_newocean/ALON_PPP_2005-2019.nc'
tela_zwd_aligned = work_yuval / 'tela_zwd_aligned_with_physical_bet_dagan.nc'
alon_zwd_aligned = work_yuval / 'ALON_zwd_aligned_with_physical_bet_dagan.nc'
jslm_zwd_aligned = work_yuval / 'JSLM_zwd_aligned_with_physical_bet_dagan.nc'
tela_ims = ims_path / '10mins/TEL-AVIV-COAST_178_TD_10mins_filled.nc'
alon_ims = ims_path / '10mins/ASHQELON-PORT_208_TD_10mins_filled.nc'
jslm_ims = ims_path / '10mins/JERUSALEM-CENTRE_23_TD_10mins_filled.nc'
station_on_geo = geo_path / 'Work_Files/PW_yuval/GNSS_stations'
era5_path = work_yuval / 'ERA5'
PW_stations_path = work_yuval / '1minute'
# stations = pd.read_csv('All_gps_stations.txt', header=0, delim_whitespace=True,
# index_col='name')
logs_path = geo_path / 'Python_Projects/PW_from_GPS/log_files'
GNSS = work_yuval / 'GNSS_stations'
cwd = Path().cwd()
gnss_sound_stations_dict = {'acor': '08001', 'mall': '08302'}
# TODO: kappa_ml_with_cats yields smaller k using cats not None, check it...
# TODO: then assemble PW for all the stations.
class LinearRegression_with_stats(LinearRegression):
"""
LinearRegression class after sklearn's, but calculate t-statistics
and p-values for model coefficients (betas).
Additional attributes available after .fit()
are `t` and `p` which are of the shape (y.shape[1], X.shape[1])
which is (n_features, n_coefs)
This class sets the intercept to 0 by default, since usually we include it
in X.
"""
def __init__(self, *args, **kwargs):
# if not "fit_intercept" in kwargs:
# kwargs['fit_intercept'] = False
super().__init__(*args,**kwargs)
def fit(self, X, y=None, verbose=True, **fit_params):
from scipy import linalg
""" A wrapper around the fitting function.
Improved: adds the X_ and y_ and results_ attrs to class.
Parameters
----------
X : xarray DataArray, Dataset other other array-like
The training input samples.
y : xarray DataArray, Dataset other other array-like
The target values.
Returns
-------
Returns self.
"""
self = super().fit(X, y, **fit_params)
n, k = X.shape
yHat = np.matrix(self.predict(X)).T
# Change X and Y into numpy matricies. x also has a column of ones added to it.
x = np.hstack((np.ones((n,1)),np.matrix(X)))
y = np.matrix(y).T
# Degrees of freedom.
df = float(n-k-1)
# Sample variance.
sse = np.sum(np.square(yHat - y),axis=0)
self.sampleVariance = sse/df
# Sample variance for x.
self.sampleVarianceX = x.T*x
# Covariance Matrix = [(s^2)(X'X)^-1]^0.5. (sqrtm = matrix square root. ugly)
self.covarianceMatrix = linalg.sqrtm(self.sampleVariance[0,0]*self.sampleVarianceX.I)
# Standard erros for the difference coefficients: the diagonal elements of the covariance matrix.
self.se = self.covarianceMatrix.diagonal()[1:]
# T statistic for each beta.
self.betasTStat = np.zeros(len(self.se))
for i in range(len(self.se)):
self.betasTStat[i] = self.coef_[i]/self.se[i]
# P-value for each beta. This is a two sided t-test, since the betas can be
# positive or negative.
self.betasPValue = 1 - stats.t.cdf(abs(self.betasTStat),df)
return self
def compare_different_cats_bet_dagan_tela():
from aux_gps import error_mean_rmse
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=None)
ds_hour, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['hour'])
ds_season, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['season'])
ds_hour_season, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['hour', 'season'])
ds = ds.dropna('sound_time')
ds_hour = ds_hour.dropna('sound_time')
ds_season = ds_season.dropna('sound_time')
ds_hour_season = ds_hour_season.dropna('sound_time')
mean_none, rmse_none = error_mean_rmse(ds['tpw_bet_dagan'], ds['tela_pw'])
mean_hour, rmse_hour = error_mean_rmse(
ds_hour['tpw_bet_dagan'], ds_hour['tela_pw'])
mean_season, rmse_season = error_mean_rmse(
ds_season['tpw_bet_dagan'], ds_season['tela_pw'])
mean_hour_season, rmse_hour_season = error_mean_rmse(
ds_hour_season['tpw_bet_dagan'], ds_hour_season['tela_pw'])
hour_mean_per = 100 * (abs(mean_none) - abs(mean_hour)) / abs(mean_none)
hour_rmse_per = 100 * (abs(rmse_none) - abs(rmse_hour)) / abs(rmse_none)
season_mean_per = 100 * (abs(mean_none) - abs(mean_season)) / abs(mean_none)
season_rmse_per = 100 * (abs(rmse_none) - abs(rmse_season)) / abs(rmse_none)
hour_season_mean_per = 100 * (abs(mean_none) - abs(mean_hour_season)) / abs(mean_none)
hour_season_rmse_per = 100 * (abs(rmse_none) - abs(rmse_hour_season)) / abs(rmse_none)
print(
'whole data mean: {:.2f} and rmse: {:.2f}'.format(
mean_none,
rmse_none))
print(
'hour data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_hour, rmse_hour, hour_mean_per, hour_rmse_per))
print(
'season data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_season, rmse_season, season_mean_per, season_rmse_per))
print(
'hour and season data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_hour_season, rmse_hour_season, hour_season_mean_per, hour_season_rmse_per))
return
def PW_trend_analysis(path=work_yuval, anom=False, station='tela'):
import xarray as xr
pw = xr.open_dataset(path / 'GNSS_daily_PW.nc')[station]
if anom:
pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw_lr = ML_fit_model_to_tmseries(pw, modelname='LR', plot=False, verbose=True)
pw_tsen = ML_fit_model_to_tmseries(pw, modelname='TSEN', plot=False, verbose=True)
return pw_tsen
def produce_gnss_pw_from_uerra(era5_path=era5_path,
glob_str='UERRA_TCWV_*.nc',
pw_path=work_yuval, savepath=None):
from aux_gps import path_glob
import xarray as xr
from aux_gps import save_ncfile
udf = add_UERRA_xy_to_israeli_gps_coords(pw_path, era5_path)
files = path_glob(era5_path, glob_str)
uerra_list = [xr.open_dataset(file) for file in files]
ds_attrs = uerra_list[0].attrs
ds_list = []
for i, uerra in enumerate(uerra_list):
print('proccessing {}'.format(files[i].as_posix().split('/')[-1]))
st_list = []
for station in udf.index:
y = udf.loc[station, 'y']
x = udf.loc[station, 'x']
uerra_st = uerra['tciwv'].isel(y=y, x=x).reset_coords(drop=True)
uerra_st.name = station
uerra_st.attrs = uerra['tciwv'].attrs
uerra_st.attrs['lon'] = udf.loc[station, 'lon']
uerra_st.attrs['lat'] = udf.loc[station, 'lat']
st_list.append(uerra_st)
ds_st = xr.merge(st_list)
ds_list.append(ds_st)
ds = xr.concat(ds_list, 'time')
ds = ds.sortby('time')
ds.attrs = ds_attrs
ds_monthly = ds.resample(time='MS', keep_attrs=True).mean(keep_attrs=True)
if savepath is not None:
filename = 'GNSS_uerra_4xdaily_PW.nc'
save_ncfile(ds, savepath, filename)
filename = 'GNSS_uerra_monthly_PW.nc'
save_ncfile(ds_monthly, savepath, filename)
return ds
def produce_PWV_flux_from_ERA5_UVQ(
path=era5_path,
savepath=None,
pw_path=work_yuval, return_magnitude=False):
import xarray as xr
from aux_gps import calculate_pressure_integral
from aux_gps import calculate_g
from aux_gps import save_ncfile
import numpy as np
ds = xr.load_dataset(era5_path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
g = calculate_g(ds['latitude']).mean().item()
qu = calculate_pressure_integral(ds['q'] * ds['u'])
qv = calculate_pressure_integral(ds['q'] * ds['v'])
qu.name = 'qu'
qv.name = 'qv'
# convert to mm/sec units
qu = 100 * qu / (g * 1000)
qv = 100 * qv / (g * 1000)
# add attrs:
qu.attrs['units'] = 'mm/sec'
qv.attrs['units'] = 'mm/sec'
qu_gnss = produce_era5_field_at_gnss_coords(
qu, savepath=None, pw_path=pw_path)
qv_gnss = produce_era5_field_at_gnss_coords(
qv, savepath=None, pw_path=pw_path)
if return_magnitude:
qflux = np.sqrt(qu_gnss**2 + qv_gnss**2)
qflux.attrs['units'] = 'mm/sec'
return qflux
else:
return qu_gnss, qv_gnss
def produce_era5_field_at_gnss_coords(era5_da, savepath=None,
pw_path=work_yuval):
import xarray as xr
from aux_gps import save_ncfile
print('reading ERA5 {} field.'.format(era5_da.name))
gps = produce_geo_gnss_solved_stations(plot=False)
era5_pw_list = []
for station in gps.index:
slat = gps.loc[station, 'lat']
slon = gps.loc[station, 'lon']
da = era5_da.sel(latitude=slat, longitude=slon, method='nearest')
da.name = station
da.attrs['era5_lat'] = da.latitude.values.item()
da.attrs['era5_lon'] = da.longitude.values.item()
da = da.reset_coords(drop=True)
era5_pw_list.append(da)
ds = xr.merge(era5_pw_list)
if savepath is not None:
name = era5_da.name
yrmin = era5_da['time'].dt.year.min().item()
yrmax = era5_da['time'].dt.year.max().item()
filename = 'GNSS_ERA5_{}_{}-{}.nc'.format(name, yrmin, yrmax)
save_ncfile(ds, savepath, filename)
return ds
def produce_gnss_pw_from_era5(era5_path=era5_path,
glob_str='era5_TCWV_israel*.nc',
pw_path=work_yuval, savepath=None):
from aux_gps import path_glob
import xarray as xr
from aux_gps import save_ncfile
filepath = path_glob(era5_path, glob_str)[0]
print('opening ERA5 file {}'.format(filepath.as_posix().split('/')[-1]))
era5_pw = xr.open_dataarray(filepath)
era5_pw = era5_pw.sortby('time')
gps = produce_geo_gnss_solved_stations(plot=False)
era5_pw_list = []
for station in gps.index:
slat = gps.loc[station, 'lat']
slon = gps.loc[station, 'lon']
da = era5_pw.sel(lat=slat, lon=slon, method='nearest')
da.name = station
da.attrs['era5_lat'] = da.lat.values.item()
da.attrs['era5_lon'] = da.lon.values.item()
da = da.reset_coords(drop=True)
era5_pw_list.append(da)
ds_hourly = xr.merge(era5_pw_list)
ds_monthly = ds_hourly.resample(time='MS', keep_attrs=True).mean(keep_attrs=True)
if savepath is not None:
filename = 'GNSS_era5_hourly_PW.nc'
save_ncfile(ds_hourly, savepath, filename)
filename = 'GNSS_era5_monthly_PW.nc'
save_ncfile(ds_monthly, savepath, filename)
return ds_hourly
def plug_in_approx_loc_gnss_stations(log_path=logs_path, file_path=cwd):
from aux_gps import path_glob
import pandas as pd
def plug_loc_to_log_file(logfile, loc):
def replace_field(content_list, string, replacment):
pos = [(i, x) for i, x in enumerate(content_list)
if string in x][0][0]
con = content_list[pos].split(':')
con[-1] = ' {}'.format(replacment)
con = ':'.join(con)
content_list[pos] = con
return content_list
with open(logfile) as f:
content = f.read().splitlines()
repl = [
'X coordinate (m)',
'Y coordinate (m)',
'Z coordinate (m)',
'Latitude (deg)',
'Longitude (deg)',
'Elevation (m)']
location = [loc['X'], loc['Y'], loc['Z'], '+' +
str(loc['lat']), '+' + str(loc['lon']), loc['alt']]
for rep, loca in list(zip(repl, location)):
try:
content = replace_field(content, rep, loca)
except IndexError:
print('did not found {} field...'.format(rep))
pass
with open(logfile, 'w') as f:
for item in content:
f.write('{}\n'.format(item))
print('writing {}'.format(logfile))
return
# load gnss accurate loc:
acc_loc_df = pd.read_csv(file_path / 'israeli_gnss_coords.txt',
delim_whitespace=True)
log_files = path_glob(log_path, '*updated_by_shlomi*.log')
for logfile in log_files:
st_log = logfile.as_posix().split('/')[-1].split('_')[0]
try:
loc = acc_loc_df.loc[st_log, :]
except KeyError:
print('station {} not found in accurate location df, skipping'.format(st_log))
continue
plug_loc_to_log_file(logfile, loc)
print('Done!')
return
def build_df_lat_lon_alt_gnss_stations(gnss_path=GNSS, savepath=None):
from aux_gps import path_glob
import pandas as pd
import pyproj
from pathlib import Path
stations_in_gnss = [x.as_posix().split('/')[-1]
for x in path_glob(GNSS, '*')]
dss = [
load_gipsyx_results(
x,
sample_rate='MS',
plot_fields=None) for x in stations_in_gnss]
# stations_not_found = [x for x in dss if isinstance(x, str)]
# [stations_in_gnss.remove(x) for x in stations_in_gnss if x is None]
dss = [x for x in dss if not isinstance(x, str)]
dss = [x for x in dss if x is not None]
lats = [x.dropna('time').lat[0].values.item() for x in dss]
lons = [x.dropna('time').lon[0].values.item() for x in dss]
alts = [x.dropna('time').alt[0].values.item() for x in dss]
df = pd.DataFrame(lats)
df.index = [x.attrs['station'].lower() for x in dss]
df['lon'] = lons
df['alt'] = alts
df.columns = ['lat', 'lon', 'alt']
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
X, Y, Z = pyproj.transform(lla, ecef, df['lon'].values, df['lat'].values,
df['alt'].values, radians=False)
df['X'] = X
df['Y'] = Y
df['Z'] = Z
# read station names from log files:
stations_approx = pd.read_fwf(Path().cwd()/'stations_approx_loc.txt',
delim_whitespace=False, skiprows=1, header=None)
stations_approx.columns=['index','X','Y','Z','name', 'extra']
stations_approx['name'] = stations_approx['name'].fillna('') +' ' + stations_approx['extra'].fillna('')
stations_approx.drop('extra', axis=1, inplace=True)
stations_approx = stations_approx.set_index('index')
df['name'] = stations_approx['name']
df.sort_index(inplace=True)
if savepath is not None:
filename = 'israeli_gnss_coords.txt'
df.to_csv(savepath/filename, sep=' ')
return df
def produce_homogeniety_results_xr(ds, alpha=0.05, test='snht', sim=20000):
import pyhomogeneity as hg
import xarray as xr
from aux_gps import homogeneity_test_xr
hg_tests_dict = {
'snht': hg.snht_test,
'pett': hg.pettitt_test,
'b_like': hg.buishand_likelihood_ratio_test,
'b_u': hg.buishand_u_test,
'b_q': hg.buishand_q_test,
'b_range': hg.buishand_range_test}
if test == 'all':
tests = [x for x in hg_tests_dict.keys()]
ds_list = []
for t in tests:
print('running {} test...'.format(t))
rds = ds.map(homogeneity_test_xr, hg_test_func=hg_tests_dict[t],
alpha=alpha, sim=sim, verbose=False)
rds = rds.to_array('station').to_dataset('results')
ds_list.append(rds)
rds = xr.concat(ds_list, 'test')
rds['test'] = tests
rds.attrs['alpha'] = alpha
rds.attrs['sim'] = sim
else:
rds = ds.map(homogeneity_test_xr, hg_test_func=hg_tests_dict[test],
alpha=alpha, sim=sim, verbose=False)
rds = rds.to_array('station').to_dataset('results')
rds.attrs['alpha'] = alpha
rds.attrs['sim'] = sim
# df=rds.to_array('st').to_dataset('results').to_dataframe()
print('Done!')
return rds
def run_error_analysis(station='tela', task='edit30hr'):
station_on_geo = geo_path / 'Work_Files/PW_yuval/GNSS_stations'
if task == 'edit30hr':
path = station_on_geo / station / 'rinex/30hr'
err, df = gipsyx_runs_error_analysis(path, glob_str='*.dr.gz')
elif task == 'run':
path = station_on_geo / station / 'rinex/30hr/results'
err, df = gipsyx_runs_error_analysis(path, glob_str='*.tdp')
return err, df
def gipsyx_runs_error_analysis(path, glob_str='*.tdp'):
from collections import Counter
from aux_gps import get_timedate_and_station_code_from_rinex
from aux_gps import path_glob
import pandas as pd
import logging
def find_errors(content_list, name):
keys = [x for x in content_list if 'KeyError' in x]
vals = [x for x in content_list if 'ValueError' in x]
excpt = [x for x in content_list if 'Exception' in x]
err = [x for x in content_list if 'Error' in x]
trouble = [x for x in content_list if 'Trouble' in x]
problem = [x for x in content_list if 'Problem' in x]
fatal = [x for x in content_list if 'FATAL' in x]
timed = [x for x in content_list if 'Timed' in x]
errors = keys + vals + excpt + err + trouble + problem + fatal + timed
if not errors:
dt, _ = get_timedate_and_station_code_from_rinex(name)
logger.warning('found new error on {} ({})'.format(name, dt.strftime('%Y-%m-%d')))
return errors
logger = logging.getLogger('gipsyx_post_proccesser')
rfns = []
files = path_glob(path, glob_str, True)
for file in files:
# first get all the rinex filenames that gipsyx ran successfuly:
rfn = file.as_posix().split('/')[-1][0:12]
rfns.append(rfn)
if files:
logger.info('running error analysis for station {}'.format(rfn[0:4].upper()))
all_errors = []
errors = []
dates = []
rinex = []
files = path_glob(path, '*.err')
for file in files:
rfn = file.as_posix().split('/')[-1][0:12]
# now, filter the error files that were copyed but there is tdp file
# i.e., the gipsyx run was successful:
if rfn in rfns:
continue
else:
dt, _ = get_timedate_and_station_code_from_rinex(rfn)
dates.append(dt)
rinex.append(rfn)
with open(file) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at
# the end of each line
content = [x.strip() for x in content]
all_errors.append(content)
errors.append(find_errors(content, rfn))
er = [','.join(x) for x in all_errors]
df = pd.DataFrame(data=rinex, index=dates, columns=['rinex'])
df['error'] = er
df = df.sort_index()
total = len(rfns) + len(df)
good = len(rfns)
bad = len(df)
logger.info('total files: {}, successful runs: {}, errornous runs: {}'.format(
total, good, bad))
logger.info('success percent: {0:.1f}%'.format(100.0 * good / total))
logger.info('error percent: {0:.1f}%'.format(100.0 * bad / total))
# now count the similar errors and sort:
flat_list = [item for sublist in errors for item in sublist]
counted_errors = Counter(flat_list)
errors_sorted = sorted(counted_errors.items(), key=lambda x: x[1],
reverse=True)
return errors_sorted, df
def compare_gipsyx_soundings(sound_path=sound_path, gps_station='acor',
times=['1996', '2019'], var='pw'):
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
import xarray as xr
from aux_gps import path_glob
# sns.set_style('whitegrid')
# ds = mean_zwd_over_sound_time(
# physical_file, ims_path=ims_path, gps_station='tela',
# times=times)
sound_station = gnss_sound_stations_dict.get(gps_station)
gnss = load_gipsyx_results(plot_fields=None, station=gps_station)
sound_file = path_glob(sound_path, 'station_{}_soundings_ts_tm_tpw*.nc'.format(sound_station))[0]
sds = xr.open_dataset(sound_file)
time_dim = list(set(sds.dims))[0]
sds = sds.rename({time_dim: 'time'})
sds[gps_station] = gnss.WetZ
if var == 'zwd':
k = kappa(sds['Tm'], Tm_input=True)
sds['sound'] = sds.Tpw / k
sds[gps_station] = gnss.WetZ
elif var == 'pw':
linear_model = ml_models_T_from_sounding(times=times,
station=sound_station,
plot=False, models=['LR'])
linear_model = linear_model.sel(name='LR').values.item()
k = kappa_ml(sds['Ts'] - 273.15, model=linear_model, no_error=True)
sds[gps_station] = sds[gps_station] * k
sds['sound'] = sds.Tpw
sds = sds.dropna('time')
sds = sds.sel(time=slice(*times))
df = sds[['sound', gps_station]].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
[x.set_xlim([pd.to_datetime(times[0]), pd.to_datetime(times[1])])
for x in axes]
df.columns = ['{} soundings'.format(sound_station), '{} GNSS station'.format(gps_station)]
sns.scatterplot(
data=df,
s=20,
ax=axes[0],
style='x',
linewidth=0,
alpha=0.8)
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residual distribution']
sns.scatterplot(
data=df_r,
color='k',
s=20,
ax=axes[1],
linewidth=0,
alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
if var == 'zwd':
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
elif var == 'pw':
axes[0].set_ylabel('Precipitable Water [mm]')
axes[1].set_ylabel('Residuals [mm]')
# sonde_change_x = pd.to_datetime('2013-08-20')
# axes[1].axvline(sonde_change_x, color='red')
# axes[1].annotate(
# 'changed sonde type from VIZ MK-II to PTU GPS',
# (mdates.date2num(sonde_change_x),
# 10),
# xytext=(
# 15,
# 15),
# textcoords='offset points',
# arrowprops=dict(
# arrowstyle='fancy',
# color='red'),
# color='red')
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
return sds
def produce_zwd_from_sounding_and_compare_to_gps(phys_sound_file=phys_soundings,
zwd_file=tela_zwd_aligned,
tm=None, plot=True):
"""compare zwd from any gps station (that first has to be aligned to
Bet_dagan station) to that of Bet-Dagan radiosonde station using tm from
either bet dagan or user inserted. by default, using zwd from pw by
inversing Bevis 1992 et al. formula"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.dates as mdates
station = zwd_file.as_posix().split('/')[-1].split('_')[0]
zwd_and_tpw = xr.open_dataset(zwd_file)
tpw = zwd_and_tpw['Tpw']
pds = get_ts_tm_from_physical(phys_sound_file, plot=False)
if tm is None:
k = kappa(pds['tm'], Tm_input=True)
else:
k = kappa(tm, Tm_input=True)
zwd_sound = tpw / k
zwd_and_tpw['WetZ_from_bet_dagan'] = zwd_sound
radio = zwd_and_tpw['WetZ_from_bet_dagan']
gps = zwd_and_tpw['{}_WetZ'.format(station)]
gps.name = ['WetZ_from_TELA']
if plot:
# sns.set_style("whitegrid")
df = radio.to_dataframe()
df[gps.name] = gps.to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
[x.set_xlim([pd.to_datetime('2007-12-31'), pd.to_datetime('2019')]) for x in axes]
# radio.plot.line(marker='.', linewidth=0., ax=axes[0])
sns.scatterplot(data=df, s=20, ax=axes[0], style='x', linewidth=0, alpha=0.8)
# gps.plot.line(marker='.', linewidth=0., ax=axes[0])
#sns.scatterplot(data=df, y= 'tela_WetZ', s=10, ax=axes[0])
# axes[0].legend('radiosonde', '{}_gnss_site'.format(station))
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residuals']
# (radio - gps).plot.line(marker='.', linewidth=0., ax=axes[1])
sns.scatterplot(data=df_r, color = 'k', s=20, ax=axes[1], linewidth=0, alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
axes[0].set_title('Zenith wet delay from Bet-Dagan radiosonde station and TELA GNSS satation')
sonde_change_x = pd.to_datetime('2013-08-20')
axes[1].axvline(sonde_change_x, color='red')
axes[1].annotate('changed sonde type from VIZ MK-II to PTU GPS', (mdates.date2num(sonde_change_x), 15), xytext=(15, 15),
textcoords='offset points', arrowprops=dict(arrowstyle='fancy', color='red'), color='red')
# axes[1].set_aspect(3)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
# plt.figure()
# (radio - gps).plot.hist(bins=100)
return zwd_and_tpw
def fit_ts_tm_produce_ipw_and_compare_TELA(phys_sound_file=phys_soundings,
zwd_file=tela_zwd_aligned,
IMS_file=None,
sound_path=sound_path,
categories=None, model='LR',
times=['2005', '2019'],
**compare_kwargs):
"""categories can be :'bevis', None, 'season' and/or 'hour'. None means
whole dataset ts-tm.
models can be 'LR' or 'TSEN'. compare_kwargs is for
compare_to_sounding2 i.e., times, season, hour, title"""
import xarray as xr
print(compare_kwargs)
if categories == 'bevis':
results = None
compare_kwargs.update({'title': None})
else:
results = ml_models_T_from_sounding(sound_path, categories, model,
physical_file=phys_sound_file,
times=times)
if categories is None:
compare_kwargs.update({'title': 'whole'})
elif categories is not None and categories != 'bevis':
if isinstance(categories, str):
compare_kwargs.update({'title': [categories][0]})
elif isinstance(categories, list):
compare_kwargs.update({'title': 'hour_season'})
zwd_and_tpw = xr.open_dataset(zwd_file)
if times is not None:
zwd_and_tpw = zwd_and_tpw.sel(time=slice(*times))
station = zwd_file.as_posix().split('/')[-1].split('_')[0]
tpw = zwd_and_tpw['Tpw']
if IMS_file is None:
T = xr.open_dataset(ims_path / 'GNSS_5mins_TD_ALL_1996_2019.nc')
T = T['tela']
else:
# load the 10 mins temperature data from IMS:
T = xr.open_dataset(IMS_file)
T = T.to_array(name='t').squeeze(drop=True)
zwd_and_tpw = zwd_and_tpw.rename({'{}_WetZ'.format(
station): 'WetZ', '{}_WetZ_error'.format(station): 'WetZ_error'})
zwd = zwd_and_tpw[['WetZ', 'WetZ_error']]
zwd.attrs['station'] = station
pw_gps = produce_single_station_IPW(zwd, T, mda=results, model_name=model)
compare_to_sounding2(pw_gps['PW'], tpw, station=station, **compare_kwargs)
return pw_gps, tpw
def mean_ZWD_over_sound_time_and_fit_tstm(path=work_yuval,
sound_path=sound_path,
data_type='phys',
ims_path=ims_path,
gps_station='tela',
times=['2007', '2019'], plot=False,
cats=None,
savepath=None):
import xarray as xr
import joblib
from aux_gps import multi_time_coord_slice
from aux_gps import path_glob
from aux_gps import xr_reindex_with_date_range
from sounding_procedures import load_field_from_radiosonde
from sounding_procedures import get_field_from_radiosonde
"""mean the WetZ over the gps station soundings datetimes to get a more
accurate realistic measurement comparison to soundings"""
# tpw = load_field_from_radiosonde(path=sound_path, field='PW', data_type=data_type,
# reduce='max',dim='Height', plot=False)
min_time = get_field_from_radiosonde(path=sound_path, field='min_time', data_type='phys',
reduce=None, plot=False)
max_time = get_field_from_radiosonde(path=sound_path, field='max_time', data_type='phys',
reduce=None, plot=False)
sound_time = get_field_from_radiosonde(path=sound_path, field='sound_time', data_type='phys',
reduce=None, plot=False)
min_time = min_time.dropna('sound_time').values
max_time = max_time.dropna('sound_time').values
# load the zenith wet daley for GPS (e.g.,TELA) station:
file = path_glob(path, 'ZWD_thresh_*.nc')[0]
zwd = xr.open_dataset(file)[gps_station]
zwd_error = xr.open_dataset(file)[gps_station + '_error']
freq = pd.infer_freq(zwd.time.values)
if not freq:
zwd = xr_reindex_with_date_range(zwd)
zwd_error = xr_reindsave_GNSS_PW_israeli_stationsex_with_date_range(zwd_error)
freq = pd.infer_freq(zwd.time.values)
min_time = zwd.time.sel(time=min_time, method='nearest').values
max_time = zwd.time.sel(time=max_time, method='nearest').values
da_group = multi_time_coord_slice(min_time, max_time, freq=freq,
time_dim='time', name='sound_time')
zwd[da_group.name] = da_group
zwd_error[da_group.name] = da_group
ds = zwd.groupby(zwd[da_group.name]).mean(
'time').to_dataset(name='{}'.format(gps_station))
ds['{}_std'.format(gps_station)] = zwd.groupby(
zwd[da_group.name]).std('time')
ds['{}_error'.format(gps_station)] = zwd_error.groupby(
zwd[da_group.name]).mean('time')
ds['sound_time'] = sound_time.dropna('sound_time')
# ds['tpw_bet_dagan'] = tpw
wetz = ds['{}'.format(gps_station)]
wetz_error = ds['{}_error'.format(gps_station)]
# do the same for surface temperature:
file = path_glob(ims_path, 'GNSS_5mins_TD_ALL_*.nc')[0]
td = xr.open_dataset(file)[gps_station].to_dataset(name='ts')
min_time = td.time.sel(time=min_time, method='nearest').values
max_time = td.time.sel(time=max_time, method='nearest').values
freq = pd.infer_freq(td.time.values)
da_group = multi_time_coord_slice(min_time, max_time, freq=freq,
time_dim='time', name='sound_time')
td[da_group.name] = da_group
ts_sound = td.ts.groupby(td[da_group.name]).mean('time')
ts_sound['sound_time'] = sound_time.dropna('sound_time')
ds['{}_ts'.format(gps_station)] = ts_sound
ts_sound = ts_sound.rename({'sound_time': 'time'})
# prepare ts-tm data:
tm = get_field_from_radiosonde(path=sound_path, field='Tm', data_type=data_type,
reduce=None, dim='Height', plot=False)
ts = get_field_from_radiosonde(path=sound_path, field='Ts', data_type=data_type,
reduce=None, dim='Height', plot=False)
tstm = xr.Dataset()
tstm['Tm'] = tm
tstm['Ts'] = ts
tstm = tstm.rename({'sound_time': 'time'})
# select a model:
mda = ml_models_T_from_sounding(categories=cats, models=['LR', 'TSEN'],
physical_file=tstm, plot=plot,
times=times)
# compute the kappa function and multiply by ZWD to get PW(+error):
k, dk = produce_kappa_ml_with_cats(ts_sound, mda=mda, model_name='TSEN')
ds['{}_pw'.format(gps_station)] = k.rename({'time': 'sound_time'}) * wetz
ds['{}_pw_error'.format(gps_station)] = np.sqrt(
wetz_error**2.0 + dk**2.0)
# divide by kappa calculated from bet_dagan ts to get bet_dagan zwd:
k = kappa(tm, Tm_input=True)
# ds['zwd_bet_dagan'] = ds['tpw_bet_dagan'] / k
if savepath is not None:
m = mda.to_dataset('name')
for model in m:
joblib.dump(m[model].item(), savepath/'ts_tm_{}.pkl'.format(model))
print('{} saved to {}.'.format(model, savepath))
return ds, mda
def load_mda(path=work_yuval):
import joblib
from aux_gps import path_glob
import xarray as xr
files = path_glob(path, 'ts_tm_*.pkl')
names = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-1] for x in files]
dsl = [joblib.load(x) for x in files]
dsl = [xr.DataArray(x) for x in dsl]
mda = xr.concat(dsl, 'name')
mda['name'] = names
mda.attrs['time_dim'] = 'time'
mda.attrs['LR_whole_stderr_slope'] = 0.006420637318868484
return mda
#def align_physical_bet_dagan_soundings_pw_to_gps_station_zwd(
# phys_sound_file, ims_path=ims_path, gps_station='tela',
# savepath=work_yuval, model=None):
# """compare the IPW of the physical soundings of bet dagan station to
# the any gps station - using IMS temperature of that gps station"""
# from aux_gps import get_unique_index
# from aux_gps import keep_iqr
# from aux_gps import dim_intersection
# import xarray as xr
# import numpy as np
# filename = '{}_zwd_aligned_with_physical_bet_dagan.nc'.format(gps_station)
# if not (savepath / filename).is_file():
# print('saving {} to {}'.format(filename, savepath))
# # first load physical bet_dagan Tpw, Ts, Tm and dt_range:
# phys = xr.open_dataset(phys_sound_file)
# # clean and merge:
# p_list = [get_unique_index(phys[x], 'sound_time')
# for x in ['Ts', 'Tm', 'Tpw', 'dt_range']]
# phys_ds = xr.merge(p_list)
# phys_ds = keep_iqr(phys_ds, 'sound_time', k=2.0)
# phys_ds = phys_ds.rename({'Ts': 'ts', 'Tm': 'tm'})
# # load the zenith wet daley for GPS (e.g.,TELA) station:
# zwd = load_gipsyx_results(station=gps_station, plot_fields=None)
# # zwd = xr.open_dataset(zwd_file)
# zwd = zwd[['WetZ', 'WetZ_error']]
# # loop over dt_range and average the results on PW:
# wz_list = []
# wz_std = []
# wz_error_list = []
# for i in range(len(phys_ds['dt_range'].sound_time)):
# min_time = phys_ds['dt_range'].isel(sound_time=i).sel(bnd='Min').values
# max_time = phys_ds['dt_range'].isel(sound_time=i).sel(bnd='Max').values
# wetz = zwd['WetZ'].sel(time=slice(min_time, max_time)).mean('time')
# wetz_std = zwd['WetZ'].sel(time=slice(min_time, max_time)).std('time')
# wetz_error = zwd['WetZ_error'].sel(time=slice(min_time, max_time)).mean('time')
# wz_std.append(wetz_std)
# wz_list.append(wetz)
# wz_error_list.append(wetz_error)
# wetz_gps = xr.DataArray(wz_list, dims='sound_time')
# wetz_gps.name = '{}_WetZ'.format(gps_station)
# wetz_gps_error = xr.DataArray(wz_error_list, dims='sound_time')
# wetz_gps_error.name = '{}_WetZ_error'.format(gps_station)
# wetz_gps_std = xr.DataArray(wz_list, dims='sound_time')
# wetz_gps_std.name = '{}_WetZ_std'.format(gps_station)
# wetz_gps['sound_time'] = phys_ds['sound_time']
# wetz_gps_error['sound_time'] = phys_ds['sound_time']
# new_time = dim_intersection([wetz_gps, phys_ds['Tpw']], 'sound_time')
# wetz_gps = wetz_gps.sel(sound_time=new_time)
# tpw_bet_dagan = phys_ds.Tpw.sel(sound_time=new_time)
# zwd_and_tpw = xr.merge([wetz_gps, wetz_gps_error, wetz_gps_std,
# tpw_bet_dagan])
# zwd_and_tpw = zwd_and_tpw.rename({'sound_time': 'time'})
# comp = dict(zlib=True, complevel=9) # best compression
# encoding = {var: comp for var in zwd_and_tpw.data_vars}
# zwd_and_tpw.to_netcdf(savepath / filename, 'w', encoding=encoding)
# print('Done!')
# return
# else:
# print('found file!')
# zwd_and_tpw = xr.open_dataset(savepath / filename)
# wetz = zwd_and_tpw['{}_WetZ'.format(gps_station)]
# wetz_error = zwd_and_tpw['{}_WetZ_error'.format(gps_station)]
# # load the 10 mins temperature data from IMS:
# td = xr.open_dataset(ims_path/'GNSS_5mins_TD_ALL_1996_2019.nc')
# td = td[gps_station]
# td.name = 'Ts'
# # tela_T = tela_T.resample(time='5min').ffill()
# # compute the kappa function and multiply by ZWD to get PW(+error):
# k, dk = kappa_ml(td, model=model, verbose=True)
# kappa = k.to_dataset(name='{}_kappa'.format(gps_station))
# kappa['{}_kappa_error'.format(gps_station)] = dk
# PW = (
# kappa['{}_kappa'.format(gps_station)] *
# wetz).to_dataset(
# name='{}_PW'.format(gps_station)).squeeze(
# drop=True)
# PW['{}_PW_error'.format(gps_station)] = np.sqrt(
# wetz_error**2.0 +
# kappa['{}_kappa_error'.format(gps_station)]**2.0)
# PW['TPW_bet_dagan'] = zwd_and_tpw['Tpw']
# PW = PW.dropna('time')
# return PW
def read_log_files(path, savepath=None, fltr='updated_by_shlomi',
suff='*.log'):
"""read gnss log files for putting them into ocean tides model"""
import pandas as pd
from aux_gps import path_glob
from tabulate import tabulate
def to_fwf(df, fname, showindex=False):
from tabulate import simple_separated_format
tsv = simple_separated_format(" ")
# tsv = 'plain'
content = tabulate(
df.values.tolist(), list(
df.columns), tablefmt=tsv, showindex=showindex, floatfmt='f')
open(fname, "w").write(content)
files = sorted(path_glob(path, glob_str=suff))
record = {}
for file in files:
filename = file.as_posix().split('/')[-1]
if fltr not in filename:
continue
station = filename.split('_')[0]
print('reading station {} log file'.format(station))
with open(file) as f:
content = f.readlines()
content = [x.strip() for x in content]
posnames = ['X', 'Y', 'Z']
pos_list = []
for pos in posnames:
text = [
x for x in content if '{} coordinate (m)'.format(pos) in x][0]
xyz = float(text.split(':')[-1])
pos_list.append(xyz)
text = [x for x in content if 'Site Name' in x][0]
name = text.split(':')[-1]
st_id = [x for x in content if 'Four Character ID' in x][0]
st_id = st_id.split(':')[-1]
record[st_id] = pos_list
pos_list.append(name)
df = pd.DataFrame.from_dict(record, orient='index')
posnames.append('name')
df.columns = posnames
if savepath is not None:
savefilename = 'stations_approx_loc.txt'
show_index = [x + ' ' for x in df.index.tolist()]
to_fwf(df, savepath / savefilename, show_index)
# df.to_csv(savepath / savefilename, sep=' ')
print('{} was saved to {}.'.format(savefilename, savepath))
return df
def analyze_missing_rinex_files(path, savepath=None):
from aux_gps import get_timedate_and_station_code_from_rinex
from aux_gps import datetime_to_rinex_filename
from aux_gps import path_glob
import pandas as pd
dt_list = []
files = path_glob(path, '*.Z')
for file in files:
filename = file.as_posix().split('/')[-1][:-2]
dt, station = get_timedate_and_station_code_from_rinex(filename)
dt_list.append(dt)
dt_list = sorted(dt_list)
true = pd.date_range(dt_list[0], dt_list[-1], freq='1D')
# df = pd.DataFrame(dt_list, columns=['downloaded'], index=true)
dif = true.difference(dt_list)
dts = [datetime_to_rinex_filename(station, x) for x in dif]
df_missing = pd.DataFrame(data=dts, index=dif.strftime('%Y-%m-%d'),
columns=['filenames'])
df_missing.index.name = 'dates'
if savepath is not None:
filename = station + '_missing_rinex_files.txt'
df_missing.to_csv(savepath / filename)
print('{} was saved to {}'.format(filename, savepath))
return df_missing
def proc_1minute(path):
stations = pd.read_csv(path + 'Zstations', header=0,
delim_whitespace=True)
station_names = stations['NAME'].values.tolist()
df_list = []
for st_name in station_names:
print('Proccessing ' + st_name + ' Station...')
df = pd.read_csv(PW_stations_path + st_name, delim_whitespace=True)
df.columns = ['date', 'time', 'PW']
df.index = pd.to_datetime(df['date'] + 'T' + df['time'])
df.drop(columns=['date', 'time'], inplace=True)
df_list.append(df)
df = pd.concat(df_list, axis=1)
print('Concatanting to Xarray...')
# ds = xr.concat([df.to_xarray() for df in df_list], dim="station")
# ds['station'] = station_names
df.columns = station_names
ds = df.to_xarray()
ds = ds.rename({'index': 'time'})
# da = ds.to_array(name='PW').squeeze(drop=True)
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in ds.data_vars}
print('Saving to PW_2007-2016.nc')
ds.to_netcdf(work_path + 'PW_2007-2016.nc', 'w', encoding=encoding)
print('Done!')
# clean the data:
# da = da.where(da >= 0, np.nan)
# da = da.where(da < 100, np.nan)
# plot the data:
ds.to_array(dim='station').plot(x='time', col='station', col_wrap=4)
# hist:
# df=ds.to_dataframe()
sl = (df > 0) & (df < 50)
df[sl].hist(bins=30, grid=False, figsize=(15, 8))
return
def parameter_study_ts_tm_TELA_bet_dagan(tel_aviv_IMS_file, path=work_yuval,
coef=[-3, 3], inter=[-300, 300],
span=10, breakdown=True, plot=True):
import xarray as xr
import numpy as np
from aux_gps import dim_intersection
import matplotlib.pyplot as plt
filename = 'TELA_zwd_aligned_with_physical_bet_dagan.nc'
zwd_and_tpw = xr.open_dataset(path / filename)
wetz = zwd_and_tpw['TELA_WetZ']
tpw = zwd_and_tpw['Tpw']
# load the 10 mins temperature data from IMS:
tela_T = xr.open_dataset(tel_aviv_IMS_file)
coef_space = np.linspace(*coef, span)
intercept_space = np.linspace(*inter, span)
model = np.stack([coef_space, intercept_space], axis=0)
if breakdown:
seasons = ['DJF', 'MAM', 'JJA', 'SON']
hours = [0, 12]
rds_list = []
for season in seasons:
for hour in hours:
print('calculating kappa of season {} and hour {}'.format(season, hour))
T = tela_T.to_array(name='TELA_T').squeeze(drop=True)
T = T.where(T['time.season'] == season).dropna('time')
T = T.where(T['time.hour'] == hour).dropna('time')
k, _ = kappa_ml(T, model=model, no_error=True)
print('building results...')
pw = k * wetz
new_time = dim_intersection([pw, tpw])
pw = pw.sel(time=new_time)
tpw_sel = tpw.sel(time=new_time)
rmse = (tpw_sel - pw)**2.0
rmse = np.sqrt(rmse.mean('time'))
mean_error = (tpw_sel - pw).mean('time')
rmse.name = 'RMSE'.format(season, hour)
mean_error.name = 'MEAN'.format(season, hour)
merged = xr.merge([mean_error, rmse])
merged = merged.expand_dims(['season', 'hour'])
merged['season'] = [season]
merged['hour'] = [hour]
rds_list.append(merged.stack(prop=['season', 'hour']))
rds = xr.concat(rds_list, 'prop').unstack('prop')
print('Done!')
else:
print('calculating kappa of for all data!')
T = tela_T.to_array(name='TELA_T').squeeze(drop=True)
k, _ = kappa_ml(T, model=model, no_error=True)
print('building results...')
pw = k * wetz
new_time = dim_intersection([pw, tpw])
pw = pw.sel(time=new_time)
tpw_sel = tpw.sel(time=new_time)
rmse = (tpw_sel - pw)**2.0
rmse = np.sqrt(rmse.mean('time'))
mean_error = (tpw_sel - pw).mean('time')
rmse.name = 'RMSE_all'
mean_error.name = 'MEAN_all'
rds = xr.merge([mean_error, rmse])
print('Done!')
if plot:
if not breakdown:
fig, ax = plt.subplots(2, 1, figsize=(12, 8), sharex=True)
rds.MEAN.plot.pcolormesh(ax=ax[0])
rds.RMSE.plot.pcolormesh(ax=ax[1])
else:
fg_mean = rds.MEAN.plot.pcolormesh(row='hour', col='season',
figsize=(20, 10),
cmap='seismic')
[ax.grid() for ax in fg_mean.fig.axes]
# fg_mean.fig.tight_layout()
# fg_mean.fig.subplots_adjust(right=0.9)
fg_rmse = rds.RMSE.plot.pcolormesh(row='hour', col='season',
figsize=(20, 10))
[ax.grid() for ax in fg_rmse.fig.axes]
# fg_mean.fig.tight_layout()
# fg_rmse.fig.subplots_adjust(right=0.9)
return rds
#def get_geo_data_from_gps_stations(gps_names):
# import requests
# from bs4 import BeautifulSoup as bs
# user = "anonymous"
# passwd = "<PASSWORD>"
# # Make a request to the endpoint using the correct auth values
# auth_values = (user, passwd)
# response = requests.get(url, auth=auth_values)
# soup = bs(response.text, "lxml")
# allLines = soup.text.split('\n')
# X = [x for x in allLines if 'XLR coordinate' in x][0].split()[-1]
# Y = [x for x in allLines if 'Y coordinate' in x][0].split()[-1]
# Z = [x for x in allLines if 'Z coordinate' in x][0].split()[-1]
#
## Convert JSON to dict and print
#print(response.json())
def read_stations_to_dataset(path, group_name='israeli', save=False,
names=None):
import xarray as xr
if names is None:
stations = []
for filename in sorted(path.glob('garner_trop_[!all_stations]*.nc')):
st_name = filename.as_posix().split('/')[-1].split('.')[0].split('_')[-1]
print('Reading station {}'.format(st_name))
da = xr.open_dataarray(filename)
da = da.dropna('time')
stations.append(da)
ds = xr.merge(stations)
if save:
savefile = 'garner_' + group_name + '_stations.nc'
print('saving {} to {}'.format(savefile, path))
ds.to_netcdf(path / savefile, 'w')
print('Done!')
return ds
def filter_stations(path, group_name='israeli', save=False):
"""filter bad values in trop products stations"""
import xarray as xr
from aux_gps import Zscore_xr
filename = 'garner_' + group_name + '_stations.nc'
print('Reading {}.nc from {}'.format(filename, path))
ds = xr.open_dataset(path / filename)
ds['zwd'].attrs['units'] = 'Zenith Wet Delay in cm'
stations = [x for x in ds.data_vars.keys()]
for station in stations:
print('filtering station {}'.format(station))
# first , remove negative values:
ds[station] = ds[station].where(ds[station].sel(zwd='value') > 0)
# get zscore of data and errors:
zscore_val = Zscore_xr(ds[station].sel(zwd='value'), dim='time')
zscore_sig = Zscore_xr(ds[station].sel(zwd='sigma'), dim='time')
# filter for zscore <5 for data and <3 for error:
ds[station] = ds[station].where(np.abs(zscore_val) < 5)
ds[station] = ds[station].where(np.abs(zscore_sig) < 3)
if save:
filename = filename + '_filtered.nc'
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in ds.data_vars}
ds.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done!')
return ds
# def overlap_time_xr(*args, union=False):
# """return the intersection of datetime objects from time field in *args"""
# # caution: for each arg input is xarray with dim:time
# time_list = []
# for ts in args:
# time_list.append(ts.time.values)
# if union:
# union = set.union(*map(set, time_list))
# un = sorted(list(union))
# return un
# else:
# intersection = set.intersection(*map(set, time_list))
# intr = sorted(list(intersection))
# return intr
def produce_pw_statistics(path=work_yuval, resample_to_mm=True, thresh=50,
pw_input=None):
import xarray as xr
from scipy.stats import kurtosis
from scipy.stats import skew
import pandas as pd
if pw_input is None:
pw = xr.load_dataset(path / 'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
else:
pw = pw_input
if resample_to_mm:
pw = pw.resample(time='MS').mean()
pd.options.display.float_format = '{:.1f}'.format
mean = pw.mean('time').reset_coords().to_array(
'index').to_dataframe('Mean')
std = pw.std('time').reset_coords().to_array('index').to_dataframe('SD')
median = pw.median('time').reset_coords().to_array(
'index').to_dataframe('Median')
q5 = pw.quantile(0.05, 'time').reset_coords(drop=True).to_array(
'index').to_dataframe('5th')
q95 = pw.quantile(0.95, 'time').reset_coords(drop=True).to_array(
'index').to_dataframe('95th')
maximum = pw.max('time').reset_coords().to_array(
'index').to_dataframe('Maximum')
minimum = pw.min('time').reset_coords().to_array(
'index').to_dataframe('Minimum')
sk = pw.map(skew, nan_policy='omit').to_array(
'index').to_dataframe('Skewness')
kurt = pw.map(kurtosis, nan_policy='omit').to_array(
'index').to_dataframe('Kurtosis')
df = pd.concat([mean, std, median, q5, q95,
maximum, minimum, sk, kurt], axis=1)
cols = []
cols.append('Site ID')
cols += [x for x in df.columns]
df['Site ID'] = df.index.str.upper()
df = df[cols]
df.index.name = ''
return df
def produce_geo_gnss_solved_stations(path=gis_path,
file='israeli_gnss_coords.txt',
add_distance_to_coast=False,
climate_path=None,
plot=True):
import geopandas as gpd
import pandas as pd
from pathlib import Path
from ims_procedures import get_israeli_coast_line
cwd = Path().cwd()
df = pd.read_csv(cwd / file, delim_whitespace=True)
df = df[['lat', 'lon', 'alt', 'name']]
isr = gpd.read_file(path / 'Israel_and_Yosh.shp')
isr.crs = {'init': 'epsg:4326'}
stations = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=isr.crs)
if add_distance_to_coast:
isr_coast = get_israeli_coast_line(path=path)
coast_lines = [isr_coast.to_crs(
'epsg:2039').loc[x].geometry for x in isr_coast.index]
for station in stations.index:
point = stations.to_crs('epsg:2039').loc[station, 'geometry']
stations.loc[station, 'distance'] = min(
[x.distance(point) for x in coast_lines]) / 1000.0
# define groups for longterm analysis, north to south, west to east:
coastal_dict = {
key: 0 for (key) in [
'kabr',
'bshm',
'csar',
'tela',
'alon',
'slom',
'nizn']}
highland_dict = {key: 1 for (key) in
['nzrt', 'mrav', 'yosh', 'jslm', 'klhv', 'yrcm', 'ramo']}
eastern_dict = {key: 2 for (key) in
['elro', 'katz', 'drag', 'dsea', 'spir', 'nrif', 'elat']}
groups_dict = {**coastal_dict, **highland_dict, **eastern_dict}
stations['groups_annual'] = pd.Series(groups_dict)
# define groups with climate code
gr1_dict = {
key: 0 for (key) in [
'kabr',
'bshm',
'csar',
'tela',
'alon',
'nzrt',
'mrav',
'yosh',
'jslm',
'elro',
'katz']}
gr2_dict = {key: 1 for (key) in
['slom', 'klhv', 'yrcm', 'drag']}
gr3_dict = {key: 2 for (key) in
['nizn', 'ramo', 'dsea', 'spir', 'nrif', 'elat']}
groups_dict = {**gr1_dict, **gr2_dict, **gr3_dict}
stations['groups_climate'] = pd.Series(groups_dict)
if climate_path is not None:
cc = pd.read_csv(climate_path / 'gnss_station_climate_code.csv',
index_col='station')
stations = stations.join(cc)
# cc, ccc = assign_climate_classification_to_gnss(path=climate_path)
# stations['climate_class'] = cc
# stations['climate_code'] = ccc
if plot:
ax = isr.plot()
stations.plot(ax=ax, column='alt', cmap='Greens',
edgecolor='black', legend=True)
for x, y, label in zip(stations.lon, stations.lat,
stations.index):
ax.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points")
return stations
def add_UERRA_xy_to_israeli_gps_coords(path=work_yuval, era5_path=era5_path):
import xarray as xr
from aux_gps import path_glob
from aux_gps import get_nearest_lat_lon_for_xy
import pandas as pd
from aux_gps import calculate_distance_between_two_lat_lon_points
file = path_glob(era5_path, 'UERRA*.nc')[0]
uerra = xr.open_dataset(file)
ulat = uerra['latitude']
ulon = uerra['longitude']
df = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
points = df[['lat', 'lon']].values
xy = get_nearest_lat_lon_for_xy(ulat, ulon, points)
udf = pd.DataFrame(xy, index=df.index, columns=['y', 'x'])
udf['lat'] = [ulat.isel(y=xi, x=yi).item() for (xi, yi) in xy]
udf['lon'] = [ulon.isel(y=xi, x=yi).item() for (xi, yi) in xy]
ddf = calculate_distance_between_two_lat_lon_points(
df['lat'],
df['lon'],
udf['lat'],
udf['lon'],
orig_epsg='4326',
meter_epsg='2039')
ddf /= 1000 # distance in km
udf['distance_to_orig'] = ddf
return udf
def produce_geo_gps_stations(path=gis_path, file='All_gps_stations.txt',
plot=True):
import geopandas as gpd
import xarray as xr
from pathlib import Path
from aux_gps import get_latlonalt_error_from_geocent_error
stations_df = pd.read_csv(file, index_col='name',
delim_whitespace=True)
isr_dem = xr.open_rasterio(path / 'israel_dem.tif')
alt_list = []
for index, row in stations_df.iterrows():
lat = row['lat']
lon = row['lon']
alt = isr_dem.sel(band=1, x=lon, y=lat, method='nearest').values.item()
alt_list.append(float(alt))
stations_df['alt_dem'] = alt_list
isr = gpd.read_file(path / 'israel_demog2012.shp')
isr.crs = {'init': 'epsg:4326'}
stations = gpd.GeoDataFrame(stations_df,
geometry=gpd.points_from_xy(stations_df.lon,
stations_df.lat),
crs=isr.crs)
stations_isr = gpd.sjoin(stations, isr, op='within')
stations_approx = pd.read_csv(Path().cwd()/'stations_approx_loc.txt',
delim_whitespace=True)
lon, lat, alt = get_latlonalt_error_from_geocent_error(
stations_approx['X'].values, stations_approx['Y'].values,
stations_approx['Z'].values)
stations_approx.columns = ['approx_X', 'approx_Y', 'approx_Z']
stations_approx['approx_lat'] = lat
stations_approx['approx_lon'] = lon
stations_approx['approx_alt'] = alt
stations_isr_df = pd.DataFrame(stations_isr.drop(columns=['geometry',
'index_right']))
compare_df = stations_isr_df.join(stations_approx)
alt_list = []
for index, row in compare_df.iterrows():
lat = row['approx_lat']
lon = row['approx_lon']
alt = isr_dem.sel(band=1, x=lon, y=lat, method='nearest').values.item()
alt_list.append(float(alt))
compare_df['approx_alt_dem'] = alt_list
if plot:
ax = isr.plot()
stations_isr.plot(ax=ax, column='alt', cmap='Greens',
edgecolor='black', legend=True)
for x, y, label in zip(stations_isr.lon, stations_isr.lat,
stations_isr.index):
ax.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points")
return stations_isr
def get_minimum_distance(geo_ims, geo_gps, path, plot=True):
def min_dist(point, gpd2):
gpd2['Dist'] = gpd2.apply(
lambda row: point.distance(
row.geometry), axis=1)
geoseries = gpd2.iloc[gpd2['Dist'].values.argmin()]
geoseries.loc['distance'] = gpd2['Dist'].values.min()
return geoseries
min_list = []
for gps_rows in geo_gps.iterrows():
ims_min_series = min_dist(gps_rows[1]['geometry'], geo_ims)
min_list.append(ims_min_series[['ID', 'name_hebrew', 'name_english',
'lon', 'lat', 'alt', 'starting_date',
'distance']])
geo_df = pd.concat(min_list, axis=1).T
geo_df['lat'] = geo_df['lat'].astype(float)
geo_df['lon'] = geo_df['lon'].astype(float)
geo_df['alt'] = geo_df['alt'].astype(float)
geo_df.index = geo_gps.index
stations_meta = ims_api_get_meta()
# select ims_stations that appear in the geo_df (closest to gps stations):
ims_selected = stations_meta.loc[stations_meta.stationId.isin(
geo_df.ID.values.tolist())]
# get the channel of temperature measurment of the selected stations:
cid = []
for index, row in geo_df.iterrows():
channel = [irow['TD_channel'] for ind, irow in ims_selected.iterrows()
if irow['stationId'] == row['ID']]
if channel:
cid.append(channel[0])
else:
cid.append(None)
# put the channel_id in the geo_df so later i can d/l the exact channel
# for each stations needed for the gps station:
geo_df['channel_id'] = cid
geo_df['channel_id'] = geo_df['channel_id'].fillna(0).astype(int)
geo_df['ID'] = geo_df.ID.astype(int)
geo_df['distance'] = geo_df.distance.astype(float)
geo_df['starting_date'] = pd.to_datetime(geo_df.starting_date)
geo_df['gps_lat'] = geo_gps.lat
geo_df['gps_lon'] = geo_gps.lon
geo_df['gps_alt'] = geo_gps.alt
geo_df['alt_diff'] = geo_df.alt - geo_gps.alt
if plot:
import geopandas as gpd
isr = gpd.read_file(path / 'israel_demog2012.shp')
isr.crs = {'init': 'epsg:4326'}
geo_gps_new = gpd.GeoDataFrame(geo_df,
geometry=gpd.points_from_xy(geo_df.lon,
geo_df.lat),
crs=isr.crs)
ax = isr.plot()
geo_gps.plot(ax=ax, color='green',
edgecolor='black', legend=True)
for x, y, label in zip(geo_gps.lon, geo_gps.lat,
geo_gps.alt):
ax.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points")
geo_gps_new.plot(ax=ax, color='red', edgecolor='black', legend=True)
for x, y, label in zip(geo_gps_new.lon, geo_gps_new.lat,
geo_gps_new.alt):
ax.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points")
return geo_df
def fix_T_height(path, geo_df, lapse_rate=6.5):
"""fix the temperature diffrence due to different height between the IMS
and GPS stations"""
# use lapse rate of 6.5 K/km = 6.5e-3 K/m
import xarray as xr
lr = 1e-3 * lapse_rate # convert to K/m
Tds = xr.open_dataset(path / 'IMS_TD_israeli_for_gps.nc')
stations = [x for x in Tds.data_vars.keys() if 'missing' not in x]
ds_list = []
for st in stations:
try:
alt_diff = geo_df.loc[st, 'alt_diff']
# correction is lapse_rate in K/m times alt_diff in meteres
# if alt_diff is positive, T should be higher and vice versa
Tds[st].attrs['description'] += ' The data was fixed using {} K/km '\
'lapse rate bc the difference'\
' between the temperature station '\
'and the gps station is {}'\
.format(lapse_rate, alt_diff)
Tds[st].attrs['lapse_rate_fix'] = lapse_rate
ds_list.append(Tds[st] + lr * alt_diff)
except KeyError:
print('{} station not found in gps data'.format(st))
continue
ds = xr.merge(ds_list)
# copy attrs:
for da in ds:
ds[da].attrs = Tds[da].attrs
return ds
def produce_geo_df(gis_path=gis_path, plot=True):
import geopandas as gpd
import matplotlib.pyplot as plt
from ims_procedures import read_ims_metadata_from_files
print('getting IMS temperature stations metadata...')
ims = read_ims_metadata_from_files(path=gis_path, freq='10mins')
isr_with_yosh = gpd.read_file(gis_path / 'Israel_demog_yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
geo_ims = gpd.GeoDataFrame(ims, geometry=gpd.points_from_xy(ims.lon,
ims.lat),
crs=isr_with_yosh.crs)
print('getting GPS stations ZWD from garner...')
gps = produce_geo_gps_stations(gis_path, plot=False)
# print('combining temperature and GPS stations into one dataframe...')
# geo_df = get_minimum_distance(ims, gps, gis_path, plot=False)
print('Done!')
if plot:
ax = isr_with_yosh.plot(figsize=(10, 8))
geo_ims.plot(ax=ax, color='red', edgecolor='black', legend=True)
gps.plot(ax=ax, color='green', edgecolor='black', legend=True)
plt.legend(['IMS_stations', 'GNSS stations'])
# for x, y, label in zip(gps.lon, gps.lat,
# gps.index):
# ax.annotate(label, xy=(x, y), xytext=(3, 3),
# textcoords="offset points")
plt.tight_layout()
return ims, gps
def save_GNSS_PWV_hydro_stations(path=work_yuval, stacked=False, sd=False):
import xarray as xr
from aux_gps import save_ncfile
from aux_gps import time_series_stack
if not stacked:
file = path / 'ZWD_thresh_0_for_hydro_analysis.nc'
zwd = xr.load_dataset(file)
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm()
ds = save_GNSS_PW_israeli_stations(model_name='TSEN',
thresh=0,mda=mda,
extra_name='for_hydro_analysis')
else:
if stacked == 'stack':
file = path / 'GNSS_PW_thresh_0_for_hydro_analysis.nc'
pwv = xr.open_dataset(file)
pwv = pwv[[x for x in pwv if '_error' not in x]]
pwv.load()
pwv_stacked = pwv.map(time_series_stack, grp2='dayofyear', return_just_stacked_da=True)
filename = 'GNSS_PW_thresh_0_hour_dayofyear_rest.nc'
save_ncfile(pwv_stacked, path, filename)
elif stacked == 'unstack':
file = path / 'GNSS_PW_thresh_0_for_hydro_analysis.nc'
pwv = xr.open_dataset(file)
pwv = pwv[[x for x in pwv if '_error' not in x]]
pwv.load()
pwv = pwv.map(produce_PWV_anomalies_from_stacked_groups,
grp1='hour', grp2='dayofyear', plot=False, standartize=sd)
if sd:
filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms_sd.nc'
else:
filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc'
save_ncfile(pwv, path, filename)
return
def save_GNSS_ZWD_hydro_stations(path=work_yuval):
import xarray as xr
from aux_gps import save_ncfile
file = path / 'ZWD_unselected_israel_1996-2020.nc'
zwd = xr.load_dataset(file)
# zwd = zwd[[x for x in zwd.data_vars if '_error' not in x]]
filename = 'ZWD_thresh_0_for_hydro_analysis.nc'
save_ncfile(zwd, path, filename)
return
def save_GNSS_PW_israeli_stations(path=work_yuval, ims_path=ims_path,
savepath=work_yuval, mda=None,
model_name='TSEN', thresh=50,
extra_name=None):
import xarray as xr
from aux_gps import path_glob
if extra_name is not None:
file = path_glob(path, 'ZWD_thresh_{:.0f}_{}.nc'.format(thresh, extra_name))[0]
else:
file = path_glob(path, 'ZWD_thresh_{:.0f}.nc'.format(thresh))[0]
zwd = xr.load_dataset(file)
print('loaded {} file as ZWD.'.format(file.as_posix().split('/')[-1]))
file = sorted(path_glob(ims_path, 'GNSS_5mins_TD_ALL_*.nc'))[-1]
Ts = xr.load_dataset(file)
print('loaded {} file as Ts.'.format(file.as_posix().split('/')[-1]))
stations = [x for x in zwd.data_vars]
ds_list = []
for sta in stations:
print(sta, '5mins')
pw = produce_GNSS_station_PW(zwd[sta], Ts[sta.split('_')[0]], mda=mda,
plot=False, model_name=model_name,
model_dict=None)
ds_list.append(pw)
ds = xr.merge(ds_list)
ds.attrs.update(zwd.attrs)
if savepath is not None:
if extra_name is not None:
filename = 'GNSS_PW_thresh_{:.0f}_{}.nc'.format(thresh, extra_name)
else:
filename = 'GNSS_PW_thresh_{:.0f}.nc'.format(thresh)
print('saving {} to {}'.format(filename, savepath))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in ds.data_vars}
ds.to_netcdf(savepath / filename, 'w', encoding=encoding)
# for skey in sample.keys():
# ds_list = []
# for sta in stations:
# print(sta, sample[skey])
# pw = produce_GNSS_station_PW(sta, skey, plot=False, phys=phys)
# ds_list.append(pw)
# ds = xr.merge(ds_list)
# if savepath is not None:
# filename = 'GNSS_{}_PW.nc'.format(sample[skey])
# print('saving {} to {}'.format(filename, savepath))
# comp = dict(zlib=True, complevel=9) # best compression
# encoding = {var: comp for var in ds.data_vars}
# ds.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return ds
def align_group_pw_and_T_to_long_term_monthly_means_and_save(
load_path=work_yuval,
ims_path=ims_path,
thresh=50,
grp='month',
savepath=work_yuval):
import xarray as xr
from aux_gps import weighted_long_term_monthly_means_da
pw = xr.load_dataset(load_path / 'GNSS_PW_thresh_{:.0f}.nc'.format(thresh))
pw_attrs = pw.attrs
attrs = {da: val.attrs for (da, val) in pw.data_vars.items()}
# use upper() on names:
da = pw.to_array('station')
da['station'] = da['station'].str.upper()
pw = da.to_dataset('station')
for da in pw.data_vars.values():
da.attrs = attrs.get(da.name.lower())
T = xr.load_dataset(ims_path / 'GNSS_5mins_TD_ALL_1996_2019.nc')
# align T and pw:
for da in pw.data_vars:
pw['{}_T'.format(da)] = T[da.lower()]
# pw_grp = pw.map(, plot=False)
pw_grp = pw.groupby('time.{}'.format(grp)).mean('time')
pw_grp.attrs = pw_attrs
# now do climatology also:
pw_clim = pw.map(weighted_long_term_monthly_means_da, plot=False)
# for sta in pw_clim.data_vars.keys():
# pw_clim = pw_clim.rename({sta: sta + '_clim'})
pw_clim.attrs = pw_attrs
just_pw = [x for x in pw_clim if '_T' not in x]
for da in just_pw:
pw_clim[da].attrs = attrs.get(da.lower())
if savepath is not None:
filename = 'PW_T_{}ly_means_clim_thresh_{:.0f}.nc'.format(grp, thresh)
pw_clim.to_netcdf(savepath / filename, 'w')
print('saved {} to {}.'.format(filename, savepath))
return pw_clim
def group_anoms_and_cluster(load_path=work_yuval, remove_grp='month',
thresh=50, grp='hour', with_weights=True,
season=None, n_clusters=4, pw_input=None):
import xarray as xr
from sklearn.cluster import KMeans
# load data and save attrs in dict:
# pw = xr.load_dataset(work_yuval/'GNSS_PW_anom_{:.0f}_hour_dayofyear.nc'.format(thresh))
pw = xr.load_dataset(load_path / 'GNSS_PW_thresh_{:.0f}.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
attrs = {da: val.attrs for (da, val) in pw.data_vars.items()}
# use upper() on names:
da = pw.to_array('station')
da['station'] = da['station'].str.upper()
pw = da.to_dataset('station')
for da in pw.data_vars.values():
da.attrs = attrs.get(da.name.lower())
# extract weights from attrs:
weights = [float(x.attrs['mean_years']) for x in pw.data_vars.values()]
weights = np.array(weights) / np.max(np.array(weights))
# select season:
if season is not None and grp == 'hour':
pw = pw.sel(time=pw['time.season'] == season)
# groupby and create means:
if remove_grp is not None:
print('removing long term {}ly means first'.format(remove_grp))
pw = pw.groupby('time.{}'.format(remove_grp)) - pw.groupby('time.{}'.format(remove_grp)).mean('time')
pw_anom = pw.groupby('time.{}'.format(grp)).mean('time')
pw_anom = pw_anom.reset_coords(drop=True)
# pw_anom = pw.groupby('time.{}'.format('month')).mean('time')
if pw_input is not None:
pw_anom = pw_input
# to dataframe:
df = pw_anom.to_dataframe()
weights = pd.Series(weights, index=[x for x in pw.data_vars])
if n_clusters is not None:
# cluster the anomalies:
if with_weights:
clr = KMeans(n_clusters=n_clusters, random_state=0).fit(df.T, sample_weight=weights)
else:
clr = KMeans(n_clusters=n_clusters, random_state=0).fit(df.T)
# get the labels start with 1:
clr.labels_ += 1
# clustering = DBSCAN(eps=3, min_samples=2).fit(df)
# clustering = OPTICS(min_samples=2).fit(df)
labels = dict(zip(df.columns, clr.labels_))
labels_sorted = {
k: v for k,
v in sorted(
labels.items(),
key=lambda item: item[1])}
order = [x for x in labels_sorted.keys()]
df = df[order]
return df, labels_sorted, weights
def produce_GNSS_station_PW(zwd_thresh, Ts, mda=None,
plot=True, model_name='TSEN', model_dict=None):
import numpy as np
from aux_gps import xr_reindex_with_date_range
"""model=None is LR, model='bevis'
is Bevis 1992-1994 et al."""
zwd_name = zwd_thresh.name
Ts_name = Ts.name
assert Ts_name in zwd_name
# use of damped Ts: ?
# Ts_daily = Ts.resample(time='1D').mean()
# upsampled_daily = Ts_daily.resample(time='1D').ffill()
# damped = Ts*0.25 + 0.75*upsampled_daily
if mda is None and model_dict is not None:
k, dk = kappa_ml(Ts, model=model_dict)
elif mda is not None:
k, dk = produce_kappa_ml_with_cats(Ts, mda=mda, model_name=model_name)
else:
raise KeyError('need model or model_dict argument for PW!')
PW = zwd_thresh.copy(deep=True)
if '_error' in zwd_name:
PW = np.sqrt(zwd_thresh**2.0 + dk**2.0)
PW.name = zwd_name
PW.attrs.update(zwd_thresh.attrs)
PW.attrs['units'] = 'mm'
PW.attrs['long_name'] = 'Precipitable water error'
else:
PW = k * zwd_thresh
PW.name = zwd_name
PW.attrs.update(zwd_thresh.attrs)
PW.attrs['units'] = 'mm'
PW.attrs['long_name'] = 'Precipitable water'
PW = PW.sortby('time')
PW = xr_reindex_with_date_range(PW, freq='5T')
if plot:
PW.plot()
return PW
def produce_kappa_ml_with_cats(Tds, mda=None, model_name='LR'):
"""produce kappa_ml with different categories such as hour, season"""
import xarray as xr
if mda is None:
# Bevis 1992 relationship:
print('Using Bevis 1992-1994 Ts-Tm relationship.')
kappa_ds, kappa_err = kappa_ml(Tds, model=None)
return kappa_ds, kappa_err
time_dim = mda.attrs['time_dim']
hours = None
seasons = None
if 'season' in [x.split('.')[-1] for x in list(mda.dims)]:
val = mda['{}.season'.format(time_dim)].values.tolist()
key = '{}.season'.format(time_dim)
seasons = {key: val}
if 'hour' in [x.split('.')[-1] for x in list(mda.dims)]:
val = mda['{}.hour'.format(time_dim)].values.tolist()
key = '{}.hour'.format(time_dim)
hours = {key: val}
if len(mda.dims) == 1 and 'name' in mda.dims:
print('Found whole data Ts-Tm relationship.')
# Tmul = mda.sel(parameter='slope').values.item()
# Toff = mda.sel(parameter='intercept').values.item()
m = mda.sel(name=model_name).values.item()
kappa_ds, kappa_err = kappa_ml(
Tds, model=m, slope_err=mda.attrs['LR_whole_stderr_slope'])
return kappa_ds, kappa_err
elif len(mda.dims) == 2 and hours is not None:
print('Found hourly Ts-Tm relationship slice.')
kappa_list = []
kappa_err_list = []
h_key = [x for x in hours.keys()][0]
for hr_num in [x for x in hours.values()][0]:
print('working on hour {}'.format(hr_num))
sliced = Tds.where(Tds[h_key] == hr_num).dropna(time_dim)
m = mda.sel({'name': model_name, h_key: hr_num}).values.item()
kappa_part, kappa_err = kappa_ml(sliced, model=m)
kappa_list.append(kappa_part)
kappa_err_list.append(kappa_err)
des_attrs = 'hourly data Tm formulation using {} model'.format(
model_name)
elif len(mda.dims) == 2 and seasons is not None:
print('Found season Ts-Tm relationship slice.')
kappa_list = []
kappa_err_list = []
s_key = [x for x in seasons.keys()][0]
for season in [x for x in seasons.values()][0]:
print('working on season {}'.format(season))
sliced = Tds.where(Tds[s_key] == season).dropna(time_dim)
m = mda.sel({'name': model_name, s_key: season}).values.item()
kappa_part, kappa_err = kappa_ml(sliced, model=m)
kappa_list.append(kappa_part)
kappa_err_list.append(kappa_err)
des_attrs = 'seasonly data Tm formulation using {} model'.format(
model_name)
elif (len(mda.dims) == 3 and seasons is not None and hours is not None):
print('Found hourly and seasonly Ts-Tm relationship slice.')
kappa_list = []
kappa_err_list = []
h_key = [x for x in hours.keys()][0]
s_key = [x for x in seasons.keys()][0]
for hr_num in [x for x in hours.values()][0]:
for season in [x for x in seasons.values()][0]:
print('working on season {}, hour {}'.format(
season, hr_num))
sliced = Tds.where(Tds[s_key] == season).dropna(
time_dim).where(Tds[h_key] == hr_num).dropna(time_dim)
m = mda.sel({'name': model_name, s_key: season,
h_key: hr_num}).values.item()
kappa_part, kappa_err = kappa_ml(sliced, model=m)
kappa_list.append(kappa_part)
kappa_err_list.append(kappa_err)
des_attrs = 'hourly and seasonly data Tm formulation using {} model'.format(
model_name)
kappa_ds = xr.concat(kappa_list, time_dim)
kappa_err_ds = xr.concat(kappa_err_list, time_dim)
return kappa_ds, kappa_err_ds
def produce_single_station_IPW(zwd, Tds, mda=None, model_name='LR'):
"""input is zwd from gipsy or garner, Tds is the temperature of the
station, mda is the Ts-Tm relationsship ml models dataarray, model is
the ml model chosen."""
import xarray as xr
# hours = dict(zip([12, 0], ['noon', 'midnight']))
if isinstance(zwd, xr.Dataset):
try:
zwd_error = zwd['WetZ_error']
zwd = zwd['WetZ']
except KeyError:
raise('no error field in zwd dataset...')
if mda is None:
# Bevis 1992 relationship:
print('Using Bevis 1992-1994 Ts-Tm relationship.')
kappa_ds, kappa_err = kappa_ml(Tds, model=None)
ipw = kappa_ds * zwd
ipw_error = kappa_ds * zwd_error + zwd * kappa_err
ipw_error.name = 'PW_error'
ipw_error.attrs['long_name'] = 'Precipitable Water standard error'
ipw_error.attrs['units'] = 'mm'
ipw.name = 'PW'
ipw.attrs['long_name'] = 'Precipitable Water'
ipw.attrs['units'] = 'mm'
ipw = ipw.to_dataset(name='PW')
ipw['PW_error'] = ipw_error
ipw.attrs['description'] = 'whole data Tm formulation using Bevis etal. 1992'
print('Done!')
return ipw
time_dim = mda.attrs['time_dim']
hours = None
seasons = None
if 'season' in [x.split('.')[-1] for x in list(mda.dims)]:
val = mda['{}.season'.format(time_dim)].values.tolist()
key = '{}.season'.format(time_dim)
seasons = {key: val}
if 'hour' in [x.split('.')[-1] for x in list(mda.dims)]:
val = mda['{}.hour'.format(time_dim)].values.tolist()
key = '{}.hour'.format(time_dim)
hours = {key: val}
if 'any_cld' in mda.dims:
any_clds = mda.any_cld.values.tolist()
if len(mda.dims) == 1 and 'name' in mda.dims:
print('Found whole data Ts-Tm relationship.')
# Tmul = mda.sel(parameter='slope').values.item()
# Toff = mda.sel(parameter='intercept').values.item()
m = mda.sel(name=model_name).values.item()
kappa_ds, kappa_err = kappa_ml(Tds, model=m, slope_err=mda.attrs['LR_whole_stderr_slope'])
ipw = kappa_ds * zwd
ipw_error = kappa_ds * zwd_error + zwd * kappa_err
ipw_error.name = 'PW_error'
ipw_error.attrs['long_name'] = 'Precipitable Water standard error'
ipw_error.attrs['units'] = 'mm'
ipw.name = 'PW'
ipw.attrs['long_name'] = 'Precipitable Water'
ipw.attrs['units'] = 'mm'
ipw = ipw.to_dataset(name='PW')
ipw['PW_error'] = ipw_error
ipw.attrs['description'] = 'whole data Tm formulation using {} model'.format(
model_name)
print('Done!')
return ipw
elif len(mda.dims) == 2 and hours is not None:
print('Found hourly Ts-Tm relationship slice.')
kappa_list = []
kappa_err_list = []
h_key = [x for x in hours.keys()][0]
for hr_num in [x for x in hours.values()][0]:
print('working on hour {}'.format(hr_num))
sliced = Tds.where(Tds[h_key] == hr_num).dropna(time_dim)
m = mda.sel({'name': model_name, h_key: hr_num}).values.item()
kappa_part, kappa_err = kappa_ml(sliced, model=m)
kappa_list.append(kappa_part)
kappa_err_list.append(kappa_err)
des_attrs = 'hourly data Tm formulation using {} model'.format(
model_name)
elif len(mda.dims) == 2 and seasons is not None:
print('Found season Ts-Tm relationship slice.')
kappa_list = []
kappa_err_list = []
s_key = [x for x in seasons.keys()][0]
for season in [x for x in seasons.values()][0]:
print('working on season {}'.format(season))
sliced = Tds.where(Tds[s_key] == season).dropna('time')
m = mda.sel({'name': model_name, s_key: season}).values.item()
kappa_part, kappa_err = kappa_ml(sliced, model=m)
kappa_list.append(kappa_part)
kappa_err_list.append(kappa_err)
des_attrs = 'seasonly data Tm formulation using {} model'.format(
model_name)
elif len(mda.dims) == 2 and set(mda.dims) == set(['any_cld', 'name']):
print('Found clouds Ts-Tm relationship slice.')
elif (len(mda.dims) == 3 and set(mda.dims) ==
set(['any_cld', 'season', 'name'])):
print('Found clouds and seasonly Ts-Tm relationship slice.')
elif (len(mda.dims) == 3 and set(mda.dims) ==
set(['any_cld', 'hour', 'name'])):
print('Found clouds and hour Ts-Tm relationship slice.')
# no way to find clouds in historical data ??
kappa_list = []
# mda_list = []
# mda_vals = []
for hr_num in hours.keys():
for any_cld in any_clds:
print('working on any_cld {}, hour {}'.format(
any_cld, hours[hr_num]))
# Tmul = models.sel(any_cld=any_cld, hour=hours[hr_num],
# parameter='slope')
# Toff = models.sel(any_cld=any_cld, hour=hours[hr_num],
# parameter='intercept')
sliced = Tds.where(Tds['time.season'] == season).dropna(
'time').where(Tds['time.hour'] == hr_num).dropna('time')
m = mda.sel(any_cld=any_cld, hour=hours[hr_num],
name=model_name)
kappa_part = kappa_ml(sliced, model=m)
kappa_keys = ['T_multiplier', 'T_offset', 'k2', 'k3']
kappa_keys = [x + '_' + season + '_' + hours[hr_num] for x in
kappa_keys]
mda_list.append(kappa_keys)
mda_vals.append([Tmul.values.item(), Toff.values.item(),
k2, k3])
kappa_list.append(kappa_part)
elif (len(mda.dims) == 3 and seasons is not None and hours is not None):
print('Found hourly and seasonly Ts-Tm relationship slice.')
kappa_list = []
kappa_err_list = []
h_key = [x for x in hours.keys()][0]
s_key = [x for x in seasons.keys()][0]
for hr_num in [x for x in hours.values()][0]:
for season in [x for x in seasons.values()][0]:
print('working on season {}, hour {}'.format(
season, hr_num))
sliced = Tds.where(Tds[s_key] == season).dropna(
time_dim).where(Tds[h_key] == hr_num).dropna(time_dim)
m = mda.sel({'name': model_name, s_key: season,
h_key: hr_num}).values.item()
kappa_part, kappa_err = kappa_ml(sliced, model=m)
kappa_list.append(kappa_part)
kappa_err_list.append(kappa_err)
des_attrs = 'hourly and seasonly data Tm formulation using {} model'.format(model_name)
kappa_ds = xr.concat(kappa_list, time_dim)
kappa_err_ds = xr.concat(kappa_err_list, time_dim)
ipw = kappa_ds * zwd
ipw_error = kappa_ds * zwd_error + zwd * kappa_err_ds
ipw_error.name = 'PW_error'
ipw_error.attrs['long_name'] = 'Precipitable Water standard error'
ipw_error.attrs['units'] = 'kg / m^2'
ipw.name = 'PW'
ipw.attrs['long_name'] = 'Precipitable Water'
ipw.attrs['units'] = 'kg / m^2'
ipw = ipw.to_dataset(name='PW')
ipw['PW_error'] = ipw_error
ipw.attrs['description'] = des_attrs
print('Done!')
ipw = ipw.reset_coords(drop=True)
return ipw
def produce_IPW_field(geo_df, ims_path=ims_path, gps_path=garner_path,
savepath=None, lapse_rate=6.5, Tmul=0.72,
T_offset=70.2, k2=22.1, k3=3.776e5, station=None,
plot=True, hist=True):
import xarray as xr
"""produce IPW field from zwd and T, for one station or all stations"""
# IPW = kappa[kg/m^3] * ZWD[cm]
print('fixing T data for height diffrences with {} K/km lapse rate'.format(
lapse_rate))
Tds = fix_T_height(ims_path, geo_df, lapse_rate)
print(
'producing kappa multiplier to T data with k2: {}, and k3: {}.'.format(
k2,
k3))
Tds = kappa(Tds, Tmul, T_offset, k2, k3)
kappa_dict = dict(zip(['T_multiplier', 'T_offset', 'k2', 'k3'],
[Tmul, T_offset, k2, k3]))
garner_zwd = xr.open_dataset(gps_path /
'garner_israeli_stations_filtered.nc')
if station is not None:
print('producing IPW field for station: {}'.format(station))
try:
ipw = Tds[station] * garner_zwd[station.upper()]
ipw.name = station.upper()
ipw.attrs['gps_lat'] = geo_df.loc[station, 'gps_lat']
ipw.attrs['gps_lon'] = geo_df.loc[station, 'gps_lon']
ipw.attrs['gps_alt'] = geo_df.loc[station, 'gps_alt']
for k, v in kappa_dict.items():
ipw.attrs[k] = v
except KeyError:
raise('{} station not found in garner gps data'.format(station))
ds = ipw.to_dataset(name=ipw.name)
ds = ds.rename({'zwd': 'ipw'})
ds['ipw'].attrs['name'] = 'IPW'
ds['ipw'].attrs['long_name'] = 'Integrated Precipitable Water'
ds['ipw'].attrs['units'] = 'kg / m^2'
print('Done!')
else:
print('producing IPW fields:')
ipw_list = []
for st in Tds:
try:
# IPW = kappa(T) * Zenith Wet Delay:
ipw = Tds[st] * garner_zwd[st.upper()]
ipw.name = st.upper()
ipw.attrs['gps_lat'] = geo_df.loc[st, 'gps_lat']
ipw.attrs['gps_lon'] = geo_df.loc[st, 'gps_lon']
ipw.attrs['gps_alt'] = geo_df.loc[st, 'gps_alt']
for k, v in kappa_dict.items():
ipw.attrs[k] = v
ipw_list.append(ipw)
except KeyError:
print('{} station not found in garner gps data'.format(st))
continue
ds = xr.merge(ipw_list)
ds = ds.rename({'zwd': 'ipw'})
ds['ipw'].attrs['name'] = 'IPW'
ds['ipw'].attrs['long_name'] = 'Integrated Precipitable Water'
ds['ipw'].attrs['units'] = 'kg / m^2'
print('Done!')
if savepath is not None:
filename = 'IPW_israeli_from_gps.nc'
print('saving {} to {}'.format(filename, savepath))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in ds.data_vars}
ds.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
if plot:
ds.sel(ipw='value').to_array(dim='station').sortby('station').plot(
x='time',
col='station',
col_wrap=4,
figsize=(15, 8))
if hist:
ds.sel(ipw='value').to_dataframe().hist(bins=100, grid=False,
figsize=(15, 8))
return ds
def check_Tm_func(Tmul_num=10, Ts_num=6, Toff_num=15):
""" check and plot Tm function to understand which bounds to put on Tmul
Toff optimization, found:Tmul (0,1), Toff (0,150)"""
import xarray as xr
Ts = np.linspace(-10, 50, Ts_num) + 273.15
Toff = np.linspace(-300, 300, Toff_num)
Tmul = np.linspace(-3, 3, Tmul_num)
Tm = np.empty((Ts_num, Tmul_num, Toff_num))
for i in range(Ts_num):
for j in range(Tmul_num):
for k in range(Toff_num):
Tm[i, j, k] = Ts[i] * Tmul[j] + Toff[k]
da = xr.DataArray(Tm, dims=['Ts', 'Tmul', 'Toff'])
da['Ts'] = Ts
da['Tmul'] = Tmul
da['Toff'] = Toff
da.plot.pcolormesh(col='Ts', col_wrap=3)
return da
def kappa_ml(T, model=None, k2=22.1, k3=3.776e5, dk3=0.004e5, dk2=2.2,
verbose=False, no_error=False, slope_err=None):
"""T in celsious, anton says k2=22.1 is better, """
import numpy as np
import xarray as xr
time_dim = list(set(T.dims))[0]
# maybe implemment Tm= linear_fit(Ts_clim, Tm_clim) + linear_fit(Ts_anom, Tm_anom)
# from sklearn.utils.estimator_checks import check_estimator
# original k2=17.0 bevis 1992 etal.
# [k2] = K / mbar, [k3] = K^2 / mbar
# 100 Pa = 1 mbar
dT = 0.5 # deg_C
if model is None:
if verbose:
print('Bevis 1992-1994 model selected.')
Tm = (273.15 + T) * 0.72 + 70.0 # K Bevis 1992 model
dTm = 0.72 * dT
elif isinstance(model, dict):
if verbose:
print(
'using linear model of Tm = {} * Ts + {}'.format(model['coef'], model['intercept']))
Tm = (273.15 + T) * model['coef'] + model['intercept']
dTm = model['coef'] * dT
elif isinstance(model, np.ndarray) and model.ndim == 2:
print('using model arg as 2d np array with dims: [coef, intercept]')
coef = model[0, :]
intercept = model[1, :]
tm = np.empty((T.values.shape[0], coef.shape[0], intercept.shape[0]))
for i in range(coef.shape[0]):
for j in range(intercept.shape[0]):
tm[:, i, j] = (273.15 + T.values) * coef[i] + intercept[j]
Tm = xr.DataArray(tm, dims=[time_dim, 'coef', 'intercept'])
Tm['time'] = T[time_dim]
Tm['coef'] = coef
Tm['intercept'] = intercept
else:
if verbose:
print('Using sklearn model of: {}'.format(model))
if hasattr(model, 'coef_'):
print(
'with coef: {} and intercept: {}'.format(
model.coef_[0],
model.intercept_))
# Tm = T.copy(deep=False)
Tnp = T.dropna(time_dim).values.reshape(-1, 1)
# T = T.values.reshape(-1, 1)
Tm = T.dropna(time_dim).copy(deep=False,
data=model.predict((273.15 + Tnp)))
Tm = Tm.reindex({time_dim: T[time_dim]})
if slope_err is not None:
dTm = model.coef_[0] * dT + slope_err * Tm
else:
dTm = model.coef_[0] * dT
# Tm = model.predict((273.15 + T))
Rv = 461.52 # [Rv] = J / (kg * K) = (Pa * m^3) / (kg * K)
# (1e-2 mbar * m^3) / (kg * K)
k = 1e-6 * (k3 / Tm + k2) * Rv
k = 1.0 / k # [k] = 100 * kg / m^3 = kg/ (m^2 * cm)
# dk = (1e6 / Rv ) * (k3 / Tm + k2)**-2 * (dk3 / Tm + dTm * k3 / Tm**2.0 + dk2)
# dk = k * np.sqrt(dk3Tm**2.0 + dk2**2.0)
if no_error:
return k
else:
dk = k * (k3 / Tm + k2)**-1 * np.sqrt((dk3 / Tm) **
2.0 + (dTm * k3 / Tm**2.0)**2.0 + dk2**2.0)
# 1 kg/m^2 IPW = 1 mm PW
return k, dk
def kappa(T, Tmul=0.72, T_offset=70.2, k2=22.1, k3=3.776e5, Tm_input=False):
"""T in celsious, or in K when Tm_input is True"""
# original k2=17.0 bevis 1992 etal.
# [k2] = K / mbar, [k3] = K^2 / mbar
# 100 Pa = 1 mbar
if not Tm_input:
Tm = (273.15 + T) * Tmul + T_offset # K
else:
Tm = T
Rv = 461.52 # [Rv] = J / (kg * K) = (Pa * m^3) / (kg * K)
# (1e-2 mbar * m^3) / (kg * K)
k = 1e-6 * (k3 / Tm + k2) * Rv
k = 1.0 / k # [k] = 100 * kg / m^3 = kg/ (m^2 * cm)
# 1 kg/m^2 IPW = 1 mm PW
return k
def calculate_ZHD(pressure, lat=30.0, ht_km=0.5,
pressure_station_height_km=None):
import numpy as np
import xarray as xr
lat_rad = np.deg2rad(lat)
if pressure_station_height_km is not None:
# adjust pressure accrding to pressure lapse rate taken empirically
# from IMS stations and pressure stations_height in kms:
plr_km_hPa = -112.653 # hPa / km
height_diff_km = ht_km - pressure_station_height_km
pressure += plr_km_hPa * height_diff_km
ZHD = 0.22794 * pressure / \
(1 - 0.00266 * np.cos(2 * lat_rad) - 0.00028 * ht_km)
if not isinstance(ZHD, xr.DataArray):
ZHD = xr.DataArray(ZHD, dims=['time'])
ZHD.name = 'ZHD'
ZHD.attrs['units'] = 'cm'
ZHD.attrs['long_name'] = 'Zenith Hydrostatic Delay'
return ZHD
def minimize_kappa_tela_sound(sound_path=sound_path, gps=garner_path,
ims_path=ims_path, station='TELA', bounds=None,
x0=None, times=None, season=None):
from skopt import gp_minimize
import xarray as xr
from sklearn.metrics import mean_squared_error
import numpy as np
from aux_gps import dim_intersection
def func_to_min(x):
Tmul = x[0]
Toff = x[1]
# k2 = x[2]
# Ta = Tmul * (Ts + 273.15) + Toff
Ts_k = Ts + 273.15
Ta = Tmul * (Ts_k) + Toff
added_loss = np.mean((np.where(Ta > Ts_k, 1.0, 0.0))) * 100.0
k = kappa(Ts, Tmul=Tmul, T_offset=Toff) # , k2=k2)
res = sound - k * zwd_gps
rmse = np.sqrt(mean_squared_error(sound, k * zwd_gps))
loss = np.abs(np.mean(res)) + rmse
print('loss:{}, added_loss:{}'.format(loss, added_loss))
loss += added_loss
return loss
# load gerner zwd data:
zwd_gps = xr.open_dataset(gps / 'garner_israeli_stations_filtered.nc')
zwd_gps = zwd_gps[station].sel(zwd='value')
zwd_gps.load()
# load bet dagan sounding data:
sound = xr.open_dataarray(sound_path / 'PW_bet_dagan_soundings.nc')
sound = sound.where(sound > 0, drop=True)
sound.load()
# load surface temperature data in C:
Tds = xr.open_dataset(ims_path / 'IMS_TD_israeli_for_gps.nc')
Ts = Tds[station.lower()]
Ts.load()
# intersect the datetimes:
new_time = dim_intersection([zwd_gps, sound, Ts], 'time')
zwd_gps = zwd_gps.sel(time=new_time)
sound = sound.sel(time=new_time)
Ts = Ts.sel(time=new_time)
if times is not None:
zwd_gps = zwd_gps.sel(time=slice(times[0], times[1]))
sound = sound.sel(time=slice(times[0], times[1]))
Ts = Ts.sel(time=slice(times[0], times[1]))
if season is not None:
print('Minimizing for season : {}'.format(season))
zwd_gps = zwd_gps.sel(time=zwd_gps['time.season'] == season)
sound = sound.sel(time=sound['time.season'] == season)
Ts = Ts.sel(time=Ts['time.season'] == season)
zwd_gps = zwd_gps.values
sound = sound.values
Ts = Ts.values
if bounds is None:
# default boundries:
bounds = {}
bounds['Tmul'] = (0.1, 1.0)
bounds['Toff'] = (0.0, 110.0)
# bounds['k2'] = (1.0, 150.0)
if x0 is None:
# default x0
x0 = {}
x0['Tmul'] = 0.5
x0['Toff'] = 90.0
# x0['k2'] = 17.0
if isinstance(x0, dict):
x0_list = [x0.get('Tmul'), x0.get('Toff')] # , x0.get('k2')]
print('Running minimization with initial X:')
for k, v in x0.items():
print(k + ': ', v)
if not x0:
x0_list = None
print('Running minimization with NO initial X...')
print('Running minimization with the following bounds:')
for k, v in bounds.items():
print(k + ': ', v)
bounds_list = [bounds.get('Tmul'), bounds.get('Toff')] # , bounds.get('k2')]
res = gp_minimize(func_to_min, dimensions=bounds_list,
x0=x0_list, n_jobs=-1, random_state=42,
verbose=False)
return res
def read_zwd_from_tdp_final(tdp_path, st_name='TELA', scatter_plot=True):
import pandas as pd
from pandas.errors import EmptyDataError
from aux_gps import get_unique_index
import matplotlib.pyplot as plt
df_list = []
for file in sorted(tdp_path.glob('*.txt')):
just_date = file.as_posix().split('/')[-1].split('.')[0]
dt = pd.to_datetime(just_date)
try:
df = pd.read_csv(file, index_col=0, delim_whitespace=True,
header=None)
df.columns = ['zwd']
df.index = dt + pd.to_timedelta(df.index * 60, unit='min')
df_list.append(df)
except EmptyDataError:
print('found empty file...')
continue
df_all = pd.concat(df_list)
df_all = df_all.sort_index()
df_all.index.name = 'time'
ds = df_all.to_xarray()
ds = ds.rename({'zwd': st_name})
ds = get_unique_index(ds)
ds[st_name] = ds[st_name].where(ds[st_name] > 0, drop=True)
if scatter_plot:
ds[st_name].plot.line(marker='.', linewidth=0.)
# plt.scatter(x=ds.time.values, y=ds.TELA.values, marker='.', s=10)
return ds
def read_rnx_headers(path=work_yuval/'rnx_headers', station='tela'):
from aux_gps import path_glob
import pandas as pd
file = path_glob(path, '{}_rnxheaders.csv'.format(station))[0]
df = pd.read_csv(file, header=0, index_col='nameDateStr')
df = df.sort_index()
df = df.drop('Unnamed: 0', axis=1)
return df
def check_anton_tela_station(anton_path, ims_path=ims_path, plot=True):
import pandas as pd
from datetime import datetime, timedelta
from pandas.errors import EmptyDataError
import matplotlib.pyplot as plt
import xarray as xr
df_list = []
for file in anton_path.glob('tela*.txt'):
day = int(''.join([x for x in file.as_posix() if x.isdigit()]))
year = 2015
dt = pd.to_datetime(datetime(year, 1, 1) + timedelta(day - 1))
try:
df = pd.read_csv(file, index_col=0, delim_whitespace=True,
header=None)
df.columns = ['zwd']
df.index = dt + pd.to_timedelta(df.index * 60, unit='min')
df_list.append(df)
except EmptyDataError:
print('found empty file...')
continue
df_all = pd.concat(df_list)
df_all = df_all.sort_index()
df_all.index.name = 'time'
ds = df_all.to_xarray()
ds = ds.rename({'zwd': 'TELA'})
new_time = pd.date_range(pd.to_datetime(ds.time.min().values),
pd.to_datetime(ds.time.max().values), freq='5min')
ds = ds.reindex(time=new_time)
if plot:
ds['TELA'].plot.line(marker='.', linewidth=0.)
# plt.scatter(x=ds.time.values, y=ds.TELA.values, marker='.', s=10)
# Tds = xr.open_dataset(ims_path / 'IMS_TD_israeli_for_gps.nc')
# k = kappa(Tds.tela, k2=22.1)
# ds = k * ds
return ds
def from_opt_to_comparison(result=None, times=None, bounds=None, x0=None,
season=None, Tmul=None, T_offset=None):
""" call optimization and comapring alltogather. can run optimization
separetly and plugin the result to compare"""
if result is None:
print('minimizing the hell out of the function!...')
result = minimize_kappa_tela_sound(times=times, bounds=bounds, x0=x0,
season=season)
geo_df = produce_geo_df()
if result:
Tmul = result.x[0]
T_offset = result.x[1]
if Tmul is not None and T_offset is not None:
# k2 = result.x[2]
ipw = produce_IPW_field(geo_df, Tmul=Tmul, T_offset=T_offset,
plot=False, hist=False, station='tela')
pw = compare_to_sounding(gps=ipw, times=times, season=season)
pw.attrs['result from fitted model'] = result.x
return pw, result
def compare_to_sounding2(pw_from_gps, pw_from_sounding, station='TELA',
times=None, season=None, hour=None, title=None):
import matplotlib.pyplot as plt
import seaborn as sns
from aux_gps import get_unique_index
from sklearn.metrics import mean_squared_error
time_dim_gps = list(set(pw_from_gps.dims))[0]
time_dim_sound = list(set(pw_from_sounding.dims))[0]
# sns.set_style('darkgrid')
pw = pw_from_gps.to_dataset(name=station).reset_coords(drop=True)
pw = pw.dropna(time_dim_gps)
pw = get_unique_index(pw, time_dim_gps)
pw_sound = pw_from_sounding.dropna(time_dim_sound)
pw['sound'] = get_unique_index(pw_sound, time_dim_sound)
pw['resid'] = pw['sound'] - pw[station]
time_dim = list(set(pw.dims))[0]
if time_dim != 'time':
pw = pw.rename({time_dim: 'time'})
if times is not None:
pw = pw.sel(time=slice(times[0], times[1]))
if season is not None:
pw = pw.sel(time=pw['time.season'] == season)
if hour is not None:
pw = pw.sel(time=pw['time.hour'] == hour)
if title is None:
sup = 'TPW is created using Bevis Tm formulation'
if title is not None:
if title == 'hour':
sup = 'TPW for {} is created using empirical hourly Tm segmentation and formulation'.format(station)
elif title == 'season':
sup = 'TPW for {} is created using empirical seasonly Tm segmentation and formulation'.format(station)
elif title == 'whole':
sup = 'TPW for {} is created using whole empirical Tm formulation'.format(station)
elif title == 'hour_season':
sup = 'TPW for {} is created using empirical seasonly and hourly Tm segmentation and formulation'.format(station)
fig, ax = plt.subplots(1, 2, figsize=(20, 4),
gridspec_kw={'width_ratios': [3, 1]})
ax[0].set_title(sup)
pw[[station, 'sound']].to_dataframe().plot(ax=ax[0], style='.')
sns.distplot(
pw['resid'].values,
bins=100,
color='c',
label='residuals',
ax=ax[1])
# pw['resid'].plot.hist(bins=100, color='c', edgecolor='k', alpha=0.65,
# ax=ax[1])
rmean = pw['resid'].mean().values
rstd = pw['resid'].std().values
rmedian = pw['resid'].median().values
rmse = np.sqrt(mean_squared_error(pw['sound'], pw[station]))
plt.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# plt.axvline(rmedian, color='b', linestyle='dashed', linewidth=1)
_, max_ = plt.ylim()
plt.text(rmean + rmean / 10, max_ - max_ / 10,
'Mean: {:.2f}, RMSE: {:.2f}'.format(rmean, rmse))
fig.tight_layout()
if season is None:
pw['season'] = pw['time.season']
pw['hour'] = pw['time.hour'].astype(str)
pw['hour'] = pw.hour.where(pw.hour != '12', 'noon')
pw['hour'] = pw.hour.where(pw.hour != '0', 'midnight')
df = pw.to_dataframe()
# g = sns.relplot(
# data=df,
# x='sound',
# y='TELA',
# col='season',
# hue='hour',
# kind='scatter',
# style='season')
# if times is not None:
# plt.subplots_adjust(top=0.85)
# g.fig.suptitle('Time: ' + times[0] + ' to ' + times[1], y=0.98)
h_order = ['noon', 'midnight']
s_order = ['DJF', 'JJA', 'SON', 'MAM']
g = sns.lmplot(
data=df,
x='sound',
y=station,
col='season',
hue='season',
row='hour',
row_order=h_order,
col_order=s_order)
g.set(ylim=(0, 50), xlim=(0, 50))
if times is not None:
plt.subplots_adjust(top=0.9)
g.fig.suptitle('Time: ' + times[0] + ' to ' + times[1], y=0.98)
g = sns.FacetGrid(data=df, col='season', hue='season', row='hour',
row_order=h_order, col_order=s_order)
g.fig.set_size_inches(15, 8)
g = (g.map(sns.distplot, "resid"))
rmeans = []
rmses = []
for hour in h_order:
for season in s_order:
sliced_pw = pw.sel(
time=pw['time.season'] == season).where(
pw.hour != hour).dropna('time')
rmses.append(
np.sqrt(
mean_squared_error(
sliced_pw['sound'],
sliced_pw[station])))
rmeans.append(sliced_pw['resid'].mean().values)
for i, ax in enumerate(g.axes.flat):
ax.axvline(rmeans[i], color='k', linestyle='dashed', linewidth=1)
_, max_ = ax.get_ylim()
ax.text(rmeans[i] + rmeans[i] / 10, max_ - max_ / 10,
'Mean: {:.2f}, RMSE: {:.2f}'.format(rmeans[i], rmses[i]))
# g.set(xlim=(-5, 5))
if times is not None:
plt.subplots_adjust(top=0.9)
g.fig.suptitle('Time: ' + times[0] + ' to ' + times[1], y=0.98)
return pw
def compare_to_sounding(sound_path=sound_path, gps=garner_path, station='TELA',
times=None, season=None, hour=None, title=None):
"""ipw comparison to bet-dagan sounding, gps can be the ipw dataset"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from pathlib import Path
# sns.set_style('darkgrid')
if isinstance(gps, Path):
pw_gps = xr.open_dataset(gps / 'IPW_israeli_from_gps.nc')
else:
pw_gps = gps
if [x for x in pw_gps.coords if x == 'ipw']:
pw_gps = pw_gps[station].sel(ipw='value')
else:
pw_gps = pw_gps[station]
pw_gps.load()
sound = xr.open_dataarray(sound_path / 'PW_bet_dagan_soundings.nc')
# drop 0 pw - not physical
sound = sound.where(sound > 0, drop=True)
sound.load()
new_time = list(set(pw_gps.dropna('time').time.values).intersection(
set(sound.dropna('time').time.values)))
new_dt = sorted(pd.to_datetime(new_time))
# selecting requires time...
print('selecting intersected datetime...')
pw_gps = pw_gps.sel(time=new_dt)
sound = sound.sel(time=new_dt)
pw = pw_gps.to_dataset(name=station).reset_coords(drop=True)
pw['sound'] = sound
pw['resid'] = pw['sound'] - pw[station]
pw.load()
print('Done!')
if times is not None:
pw = pw.sel(time=slice(times[0], times[1]))
if season is not None:
pw = pw.sel(time=pw['time.season'] == season)
if hour is not None:
pw = pw.sel(time=pw['time.hour'] == hour)
if title is None:
sup = 'PW is created using Bevis Tm formulation'
if title is not None:
if title == 'hour':
sup = 'PW is created using hourly Tm segmentation and formulation'
elif title == 'season':
sup = 'PW is created using seasonly Tm segmentation and formulation'
elif title == 'whole':
sup = 'PW is created using whole Tm formulation'
elif title == 'hour_season':
sup = 'PW is created using seasonly and hourly Tm segmentation and formulation'
fig, ax = plt.subplots(1, 2, figsize=(20, 4),
gridspec_kw={'width_ratios': [3, 1]})
fig.suptitle(sup, fontweight='bold')
pw[[station, 'sound']].to_dataframe().plot(ax=ax[0], style='.')
sns.distplot(
pw['resid'].values,
bins=100,
color='c',
label='residuals',
ax=ax[1])
# pw['resid'].plot.hist(bins=100, color='c', edgecolor='k', alpha=0.65,
# ax=ax[1])
rmean = pw['resid'].mean().values
rstd = pw['resid'].std().values
rmedian = pw['resid'].median().values
rmse = np.sqrt(mean_squared_error(pw['sound'], pw[station]))
plt.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# plt.axvline(rmedian, color='b', linestyle='dashed', linewidth=1)
_, max_ = plt.ylim()
plt.text(rmean + rmean / 10, max_ - max_ / 10,
'Mean: {:.2f}, RMSE: {:.2f}'.format(rmean, rmse))
fig.tight_layout()
if season is None:
pw['season'] = pw['time.season']
pw['hour'] = pw['time.hour'].astype(str)
pw['hour'] = pw.hour.where(pw.hour != '12', 'noon')
pw['hour'] = pw.hour.where(pw.hour != '0', 'midnight')
df = pw.to_dataframe()
# g = sns.relplot(
# data=df,
# x='sound',
# y='TELA',
# col='season',
# hue='hour',
# kind='scatter',
# style='season')
# if times is not None:
# plt.subplots_adjust(top=0.85)
# g.fig.suptitle('Time: ' + times[0] + ' to ' + times[1], y=0.98)
h_order = ['noon', 'midnight']
s_order = ['DJF', 'JJA', 'SON', 'MAM']
g = sns.lmplot(
data=df,
x='sound',
y='TELA',
col='season',
hue='season',
row='hour',
row_order=h_order,
col_order=s_order)
g.set(ylim=(0, 50), xlim=(0, 50))
if times is not None:
plt.subplots_adjust(top=0.9)
g.fig.suptitle('Time: ' + times[0] + ' to ' + times[1], y=0.98)
g = sns.FacetGrid(data=df, col='season', hue='season', row='hour',
row_order=h_order, col_order=s_order)
g.fig.set_size_inches(15, 8)
g = (g.map(sns.distplot, "resid"))
rmeans = []
rmses = []
for hour in h_order:
for season in s_order:
sliced_pw = pw.sel(
time=pw['time.season'] == season).where(
pw.hour != hour).dropna('time')
rmses.append(
np.sqrt(
mean_squared_error(
sliced_pw['sound'],
sliced_pw[station])))
rmeans.append(sliced_pw['resid'].mean().values)
for i, ax in enumerate(g.axes.flat):
ax.axvline(rmeans[i], color='k', linestyle='dashed', linewidth=1)
_, max_ = ax.get_ylim()
ax.text(rmeans[i] + rmeans[i] / 10, max_ - max_ / 10,
'Mean: {:.2f}, RMSE: {:.2f}'.format(rmeans[i], rmses[i]))
# g.set(xlim=(-5, 5))
if times is not None:
plt.subplots_adjust(top=0.9)
g.fig.suptitle('Time: ' + times[0] + ' to ' + times[1], y=0.98)
# maybe month ?
# plt.text(rmedian + rmedian / 10, max_ - max_ / 10,
# 'Mean: {:.2f}'.format(rmedian))
return pw
def ml_models_T_from_sounding(sound_path=sound_path, categories=None,
models=['LR', 'TSEN'], physical_file=None,
times=['2005', '2019'], station=None, plot=True):
"""calls formulate_plot to analyse and model the ts-tm connection from
radiosonde(bet-dagan). options for categories:season, hour, clouds
you can choose some ,all or none categories"""
import xarray as xr
from aux_gps import get_unique_index
from aux_gps import keep_iqr
from aux_gps import path_glob
if isinstance(models, str):
models = [models]
if physical_file is not None or station is not None:
print('Overwriting ds input...')
if not isinstance(physical_file, xr.Dataset):
if station is not None:
physical_file = path_glob(sound_path, 'station_{}_soundings_ts_tm_tpw*.nc'.format(station))[0]
print('station {} selected and loaded.'.format(station))
pds = xr.open_dataset(physical_file)
else:
pds = physical_file
time_dim = list(set(pds.dims))[0]
pds = pds[['Tm', 'Ts']]
pds = pds.rename({'Ts': 'ts', 'Tm': 'tm'})
# pds = pds.rename({'sound_time': 'time'})
pds = get_unique_index(pds, dim=time_dim)
pds = pds.map(keep_iqr, k=2.0, dim=time_dim, keep_attrs=True)
ds = pds.dropna(time_dim)
else:
ds = xr.open_dataset(sound_path /
'bet_dagan_sounding_pw_Ts_Tk_with_clouds.nc')
ds = ds.reset_coords(drop=True)
if times is not None:
ds = ds.sel({time_dim: slice(*times)})
# define the possible categories and feed their dictionary:
possible_cats = ['season', 'hour']
pos_cats_dict = {}
s_order = ['DJF', 'JJA', 'SON', 'MAM']
h_order = [12, 0]
cld_order = [0, 1]
if 'season' in possible_cats:
pos_cats_dict['{}.season'.format(time_dim)] = s_order
if 'hour' in possible_cats:
pos_cats_dict['{}.hour'.format(time_dim)] = h_order
if categories is None:
results = formulate_plot(ds, model_names=models, plot=plot)
if categories is not None:
if not isinstance(categories, list):
categories = [categories]
if set(categories + possible_cats) != set(possible_cats):
raise ValueError(
'choices for categories are: ' +
', '.join(possible_cats))
categories = [x.replace(x, time_dim + '.' + x) if x ==
'season' or x == 'hour' else x for x in categories]
results = formulate_plot(ds, pos_cats_dict=pos_cats_dict,
chosen_cats=categories, model_names=models,
plot=plot)
results.attrs['time_dim'] = time_dim
return results
#def linear_T_from_sounding(sound_path=sound_path, categories=None):
# import xarray as xr
# ds = xr.open_dataset(sound_path / 'bet_dagan_sounding_pw_Ts_Tk_with_clouds.nc')
# ds = ds.reset_coords(drop=True)
# s_order = ['DJF', 'JJA', 'SON', 'MAM']
# h_order = ['noon', 'midnight']
# cld_order = [0, 1]
# if categories is None:
# results = formulate_plot(ds)
# if categories is not None:
# if not isinstance(categories, list):
# categories = [categories]
# if set(categories + ['season', 'hour', 'clouds']) != set(['season',
# 'hour',
# 'clouds']):
# raise ValueError('choices for categories are: season, hour, clouds')
# if len(categories) == 1:
# if 'season' in categories:
# dd = {'season': s_order}
# elif 'hour' in categories:
# dd = {'hour': h_order}
# elif 'clouds' in categories:
# dd = {'any_cld': cld_order}
# elif len(categories) == 2:
# if 'season' in categories and 'hour' in categories:
# dd = {'hour': h_order, 'season': s_order}
# elif 'season' in categories and 'clouds' in categories:
# dd = {'any_cld': cld_order, 'season': s_order}
# elif 'clouds' in categories and 'hour' in categories:
# dd = {'hour': h_order, 'any_cld': cld_order}
# elif len(categories) == 3:
# if 'season' in categories and 'hour' in categories and 'clouds' in categories:
# dd = {'hour': h_order, 'any_cld': cld_order, 'season': s_order}
# results = formulate_plot(ds, dd)
# return results
def formulate_plot(ds, model_names=['LR', 'TSEN'],
pos_cats_dict=None, chosen_cats=None, plot=True):
"""accepts pos_cat (dict) with keys : hour, season ,and appropriate
values, and chosen keys and returns trained sklearn models with
the same slices.
this function is called by 'ml_models_T_from_sounding' above."""
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from aux_gps import standard_error_slope
time_dim = list(set(ds.dims))[0]
print('time dim is: {}'.format(time_dim))
# sns.set_style('darkgrid')
colors = ['red', 'green', 'magenta', 'cyan', 'orange', 'teal',
'gray', 'purple']
pos = np.linspace(0.95, 0.6, 8)
# if res_save not in model_names:
# raise KeyError('saved result should me in model names!')
if len(model_names) > len(colors):
raise ValueError(
'Cannot support more than {} models simultenously!'.format(
len(colors)))
ml = ML_Switcher()
models = [ml.pick_model(x) for x in model_names]
if chosen_cats is None:
print('no categories selected, using full data.')
if plot:
fig, axes = plt.subplots(1, 2, figsize=(10, 7))
fig.suptitle(
'Bet Dagan WV weighted mean atmosphric temperature(Tm) vs. surface temperature(Ts)', fontweight='bold')
X = ds.ts.values.reshape(-1, 1)
y = ds.tm.values
[model.fit(X, y) for model in models]
predict = [model.predict(X) for model in models]
coefs = [model.coef_[0] for model in models]
inters = [model.intercept_ for model in models]
# [a, b] = np.polyfit(ds.ts.values, ds.tm.values, 1)
# result = np.empty((2))
# result[0] = a
# result[1] = b
# sns.regplot(ds.ts.values, ds.tm.values, ax=axes[0])
df = ds.ts.dropna(time_dim).to_dataframe()
df['tm'] = ds.tm.dropna(time_dim)
try:
df['clouds'] = ds.any_cld.dropna(time_dim)
hue = 'clouds'
except AttributeError:
hue = None
pass
if plot:
g = sns.scatterplot(
data=df,
x='ts',
y='tm',
hue=hue,
marker='.',
s=100, linewidth=0, alpha=0.5,
ax=axes[0])
g.legend(loc='best')
# axes[0].scatter(x=ds.ts.values, y=ds.tm.values, marker='.', s=10)
# linex = np.array([ds.ts.min().item(), ds.ts.max().item()])
# liney = a * linex + b
# axes[0].plot(linex, liney, c='r')
bevis_tm = ds.ts.values * 0.72 + 70.0
if plot:
# plot bevis:
axes[0].plot(ds.ts.values, bevis_tm, c='purple')
min_, max_ = axes[0].get_ylim()
[axes[0].plot(X, newy, c=colors[i]) for i, newy in enumerate(predict)]
[axes[0].text(0.01, pos[i],
'{} a: {:.2f}, b: {:.2f}'.format(model_names[i],
coefs[i], inters[i]),
transform=axes[0].transAxes, color=colors[i],
fontsize=12) for i in range(len(coefs))]
axes[0].text(0.01, 0.8,
'Bevis 1992 et al. a: 0.72, b: 70.0',
transform=axes[0].transAxes, color='purple',
fontsize=12)
# axes[0].text(0.01, 0.9, 'a: {:.2f}, b: {:.2f}'.format(a, b),
# transform=axes[0].transAxes, color='black', fontsize=12)
axes[0].text(0.1, 0.85, 'n={}'.format(ds.ts.size),
verticalalignment='top', horizontalalignment='center',
transform=axes[0].transAxes, color='blue', fontsize=12)
axes[0].set_xlabel('Ts [K]')
axes[0].set_ylabel('Tm [K]')
# resid = ds.tm.values - ds.ts.values * a - b
resid = predict[0] - y
if plot:
sns.distplot(resid, bins=25, color='c', label='residuals', ax=axes[1])
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(predict[0], y))
if plot:
_, max_ = axes[1].get_ylim()
axes[1].text(rmean + rmean / 10, max_ - max_ / 10,
'Mean: {:.2f}, RMSE: {:.2f}'.format(rmean, rmse))
axes[1].axvline(rmean, color='r', linestyle='dashed', linewidth=1)
axes[1].set_xlabel('Residuals [K]')
fig.tight_layout()
da = xr.DataArray(models, dims=['name'])
da['name'] = model_names
da.name = 'all_data_trained_models'
# results = xr.DataArray(result, dims=['parameter'])
# results['parameter'] = ['slope', 'intercept']
elif chosen_cats is not None:
size = len(chosen_cats)
if size == 1:
key = chosen_cats[0]
vals = pos_cats_dict[key]
print('{} category selected.'.format(key))
# other_keys = [
# *set([x for x in pos_cats_dict.keys()]).difference([key])]
# other_keys = [
# *set(['any_cld', 'hour', 'season']).difference([key])]
# result = np.empty((len(vals), 2))
# residuals = []
# rmses = []
trained = []
if plot:
fig, axes = plt.subplots(1, len(vals), sharey=True, sharex=True,
figsize=(15, 8))
fig.suptitle(
'Bet Dagan WV weighted mean atmosphric temperature(Tm) vs. surface temperature(Ts) using {} selection criteria'.format(key.split('.')[-1]), fontweight='bold',x=0.5, y=1.0)
for i, val in enumerate(vals):
ts = ds.ts.where(ds[key] == val).dropna(time_dim)
tm = ds.tm.where(ds[key] == val).dropna(time_dim)
# other_val0 = ds[other_keys[0]].where(
# ds[key] == val).dropna(time_dim)
# other_val1 = ds[other_keys[1]].where(
# ds[key] == val).dropna(time_dim)
X = ts.values.reshape(-1, 1)
y = tm.values
models = [ml.pick_model(x) for x in model_names]
[model.fit(X, y) for model in models]
predict = [model.predict(X) for model in models]
coefs = [model.coef_[0] for model in models]
inters = [model.intercept_ for model in models]
# [tmul, toff] = np.polyfit(x.values, y.values, 1)
# result[i, 0] = tmul
# result[i, 1] = toff
# new_tm = tmul * x.values + toff
# resid = new_tm - y.values
# rmses.append(np.sqrt(mean_squared_error(y.values, new_tm)))
# residuals.append(resid)
if plot:
axes[i].text(0.15, 0.85, 'n={}'.format(ts.size),
verticalalignment='top',
horizontalalignment='center',
transform=axes[i].transAxes, color='blue',
fontsize=12)
df = ts.to_dataframe()
df['tm'] = tm
# df[other_keys[0]] = other_val0
# df[other_keys[1]] = other_val1
# g = sns.scatterplot(data=df, x='ts', y='tm', marker='.', s=100,
# ax=axes[i], hue=other_keys[0],
# style=other_keys[1])
if plot:
g = sns.scatterplot(data=df, x='ts', y='tm', marker='.', s=100,
ax=axes[i], linewidth=0, alpha=0.5)
g.legend(loc='upper right')
# axes[i, j].scatter(x=x.values, y=y.values, marker='.', s=10)
axes[i].set_title('{}:{}'.format(key, val))
# linex = np.array([x.min().item(), x.max().item()])
# liney = tmul * linex + toff
# axes[i].plot(linex, liney, c='r')
# unmark the following line to disable plotting y=x line:
# bevis_tm = ts.values * 0.72 + 70.0
# axes[i].plot(ts.values, bevis_tm, c='k')
min_, max_ = axes[i].get_ylim()
[axes[i].plot(X, newy, c=colors[j]) for j, newy in
enumerate(predict)]
[axes[i].text(0.01, pos[j],
'{} a: {:.2f}, b: {:.2f}'.format(model_names[j],
coefs[j],
inters[j]),
transform=axes[i].transAxes, color=colors[j],
fontsize=12) for j in range(len(coefs))]
# axes[i].text(0.015, 0.9, 'a: {:.2f}, b: {:.2f}'.format(
# tmul, toff), transform=axes[i].transAxes,
# color='black', fontsize=12)
axes[i].set_xlabel('Ts [K]')
axes[i].set_ylabel('Tm [K]')
fig.tight_layout()
trained.append(models)
da = xr.DataArray(trained, dims=[key, 'name'])
da['name'] = model_names
da[key] = vals
elif size == 2:
# other_keys = [*set(['any_cld', 'hour', 'season']).difference(keys)]
# other_keys = [*set(['hour', 'season']).difference(keys)]
vals = [pos_cats_dict[key] for key in chosen_cats]
keys = chosen_cats
# result = np.empty((len(vals[0]), len(vals[1]), 2))
# residuals = []
# rmses = []
trained = []
if plot:
fig, axes = plt.subplots(len(vals[0]), len(vals[1]), sharey=True,
sharex=True, figsize=(15, 8))
fig.suptitle(
'Bet Dagan WV weighted mean atmosphric temperature(Tm) vs. surface temperature(Ts) using {} and {} selection criteria'.format(keys[0].split('.')[-1], keys[1].split('.')[-1]), fontweight='bold',x=0.5, y=1.0)
for i, val0 in enumerate(vals[0]):
trained0 = []
for j, val1 in enumerate(vals[1]):
ts = ds.ts.where(ds[keys[0]] == val0).dropna(
time_dim).where(ds[keys[1]] == val1).dropna(time_dim)
tm = ds.tm.where(ds[keys[0]] == val0).dropna(
time_dim).where(ds[keys[1]] == val1).dropna(time_dim)
# other_val = ds[other_keys[0]].where(ds[keys[0]] == val0).dropna(
# 'time').where(ds[keys[1]] == val1).dropna('time')
X = ts.values.reshape(-1, 1)
y = tm.values
models = [ml.pick_model(x) for x in model_names]
[model.fit(X, y) for model in models]
predict = [model.predict(X) for model in models]
coefs = [model.coef_[0] for model in models]
inters = [model.intercept_ for model in models]
# [tmul, toff] = np.polyfit(x.values, y.values, 1)
# result[i, j, 0] = tmul
# result[i, j, 1] = toff
# new_tm = tmul * x.values + toff
# resid = new_tm - y.values
# rmses.append(np.sqrt(mean_squared_error(y.values, new_tm)))
# residuals.append(resid)
if plot:
axes[i, j].text(0.15, 0.85, 'n={}'.format(ts.size),
verticalalignment='top',
horizontalalignment='center',
transform=axes[i, j].transAxes,
color='blue', fontsize=12)
df = ts.to_dataframe()
df['tm'] = tm
# df[other_keys[0]] = other_val
# g = sns.scatterplot(data=df, x='ts', y='tm', marker='.',
# s=100, ax=axes[i, j],
# hue=other_keys[0])
if plot:
g = sns.scatterplot(data=df, x='ts', y='tm', marker='.',
s=100, ax=axes[i, j], linewidth=0,
alpha=0.5)
g.legend(loc='upper right')
# axes[i, j].scatter(x=x.values, y=y.values, marker='.', s=10)
# axes[i, j].set_title('{}:{}'.format(key, val))
[axes[i, j].plot(X, newy, c=colors[k]) for k, newy in
enumerate(predict)]
# linex = np.array([x.min().item(), x.max().item()])
# liney = tmul * linex + toff
# axes[i, j].plot(linex, liney, c='r')
# axes[i, j].plot(ts.values, ts.values, c='k', alpha=0.2)
min_, max_ = axes[i, j].get_ylim()
[axes[i, j].text(0.01, pos[k],
'{} a: {:.2f}, b: {:.2f}'.format(model_names[k],
coefs[k],
inters[k]),
transform=axes[i, j].transAxes, color=colors[k],
fontsize=12) for k in range(len(coefs))]
# axes[i, j].text(0.015, 0.9, 'a: {:.2f}, b: {:.2f}'.format(
# tmul, toff), transform=axes[i, j].transAxes,
# color='black', fontsize=12)
axes[i, j].set_xlabel('Ts [K]')
axes[i, j].set_ylabel('Tm [K]')
axes[i, j].set_title('{}:{}, {}:{}'.format(keys[0], val0,
keys[1], val1))
fig.tight_layout()
trained0.append(models)
trained.append(trained0)
da = xr.DataArray(trained, dims=keys + ['name'])
da['name'] = model_names
da[keys[0]] = vals[0]
da[keys[1]] = vals[1]
else:
raise ValueError('size of categories must be <=2')
X = ds.ts.values
y = ds.tm.values
std_err = standard_error_slope(X, y)
da.attrs['LR_whole_stderr_slope'] = std_err
return da
station_continous_times = {
# a station - continous data times dict:
'alon': [None, None],
'bshm': ['2010', '2017'], # strong dis behaviour
'csar': [None, '2017'], # small glich in the end
'drag': ['2004', None], # small glich in the begining
'dsea': [None, '2016'],
'elat': ['2013', None], # gliches in the begining, then up and then down
'elro': ['2005', '2009'], # up and down, chose up period
'gilb': ['2005', None],
'hrmn': [None, None], # spikes(WetZ), positive spikes in alt due to snow
'jslm': ['2006', None],
'kabr': ['2013', None], # strong dis behaviour
'katz': ['2011', '2016'], # dis behaviour
'klhv': [None, None],
'lhav': ['2004', '2006'], # dis behaviour
'mrav': [None, None],
'nizn': ['2015-09', None], # something in the begining
'nrif': ['2012', None],
'nzrt': [None, None],
'ramo': ['2006', None], # somethin in begining
'slom': ['2015-07', '2017-07'], # strong dis behaviour, ups and downs
'spir': ['2015', '2018'], # big glich in the end
'tela': ['2005', None], # gap in 2003-2004 , glich in 2004
'yosh': [None, None],
'yrcm': ['2011', None] # small glich in begining
}
def israeli_gnss_stations_long_term_trend_analysis(
gis_path=gis_path,
rel_plot='tela', show_names=True,
times_dict=station_continous_times):
import pandas as pd
from pathlib import Path
import geopandas as gpd
import matplotlib.pyplot as plt
from aux_gps import geo_annotate
import contextily as ctx
cwd = Path().cwd()
filename = 'israeli_long_term_tectonics_trends.txt'
if (cwd / filename).is_file():
df = pd.read_csv(cwd / filename, delim_whitespace=True,
index_col='station')
else:
isr_stations = pd.read_csv(cwd / 'stations_approx_loc.txt',
delim_whitespace=True)
isr_stations = isr_stations.index.tolist()
df_list = []
for station in isr_stations:
print('proccessing station: {}'.format(station))
try:
rds = get_long_trends_from_gnss_station(
station, 'LR', plot=False, times=times_dict[station])
except KeyError:
print(
'didnt find {} key in times_dict, skipping...'.format(station))
continue
except FileNotFoundError:
print(
'didnt find {} in gipsyx solutions, skipping...'.format(station))
continue
df_list.append(rds.attrs)
df = pd.DataFrame(df_list)
df.set_index(df.station, inplace=True)
df.drop('station', axis=1, inplace=True)
rest = df.columns[3:].tolist()
df.columns = [
'north_cm_per_year',
'east_cm_per_year',
'up_mm_per_year'] + rest
df['cm_per_year'] = np.sqrt(
df['north_cm_per_year'] ** 2.0 +
df['east_cm_per_year'] ** 2.0)
# define angle from east : i.e., x axis is east
df['angle_from_east'] = np.rad2deg(
np.arctan2(df['north_cm_per_year'], df['east_cm_per_year']))
for station in df.index:
df['rel_mm_north_{}'.format(station)] = (
df['north_cm_per_year'] - df.loc[station, 'north_cm_per_year']) * 100.0
df['rel_mm_east_{}'.format(station)] = (
df['east_cm_per_year'] - df.loc[station, 'east_cm_per_year']) * 100.0
df['rel_mm_per_year_{}'.format(station)] = np.sqrt(
df['rel_mm_north_{}'.format(station)] ** 2.0 +
df['rel_mm_east_{}'.format(station)] ** 2.0)
# define angle from east : i.e., x axis is east
df['rel_angle_from_east_{}'.format(station)] = np.rad2deg(np.arctan2(
df['rel_mm_north_{}'.format(station)], df['rel_mm_east_{}'.format(station)]))
df['rel_up_mm_per_year_{}'.format(
station)] = df['up_mm_per_year'] - df.loc[station, 'up_mm_per_year']
df.to_csv(cwd / filename, sep=' ')
print('{} was saved to {}'.format(filename, cwd))
isr_with_yosh = gpd.read_file(gis_path / 'Israel_demog_yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
stations = gpd.GeoDataFrame(df,
geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=isr_with_yosh.crs)
isr = gpd.sjoin(stations, isr_with_yosh, op='within')
isr_with_yosh = isr_with_yosh.to_crs(epsg=3857)
isr = isr.to_crs(epsg=3857)
isr['X'] = isr.geometry.x
isr['Y'] = isr.geometry.y
isr['U'] = isr.east_cm_per_year
isr['V'] = isr.north_cm_per_year
if rel_plot is not None:
isr['U'] = isr['rel_mm_east_{}'.format(rel_plot)]
isr['V'] = isr['rel_mm_north_{}'.format(rel_plot)]
title = 'Relative to {} station'.format(rel_plot)
vertical_label = isr['rel_up_mm_per_year_{}'.format(rel_plot)]
horizontal_label = isr['rel_mm_per_year_{}'.format(rel_plot)]
else:
title = ''
vertical_label = isr['up_mm_per_year']
horizontal_label = isr['cm_per_year']
# isr.drop('dsea', axis=0, inplace=True)
#fig, ax = plt.subplots(figsize=(20, 10))
ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15))
ctx.add_basemap(ax, url=ctx.sources.ST_TERRAIN)
ax.set_axis_off()
if show_names:
ax.plot(
[],
[],
' ',
label=r'$^{\frac{mm}{year}}\bigcirc^{name}_{\frac{mm}{year}}$')
else:
ax.plot(
[],
[],
' ',
label=r'$^{\frac{mm}{year}}\bigcirc_{\frac{mm}{year}}$')
ax.plot([], [], ' ', label='station:')
isr[(isr['years'] <= 5.0) & (isr['years'] >= 0.0)].plot(
ax=ax, markersize=50, color='m', edgecolor='k', marker='o', label='0-5 yrs')
isr[(isr['years'] <= 10.0) & (isr['years'] > 5.0)].plot(
ax=ax, markersize=50, color='y', edgecolor='k', marker='o', label='5-10 yrs')
isr[(isr['years'] <= 15.0) & (isr['years'] > 10.0)].plot(
ax=ax, markersize=50, color='g', edgecolor='k', marker='o', label='10-15 yrs')
isr[(isr['years'] <= 20.0) & (isr['years'] > 15.0)].plot(
ax=ax, markersize=50, color='c', edgecolor='k', marker='o', label='15-20 yrs')
isr[(isr['years'] <= 25.0) & (isr['years'] > 20.0)].plot(
ax=ax, markersize=50, color='r', edgecolor='k', marker='o', label='20-25 yrs')
handles, labels = plt.gca().get_legend_handles_labels()
order = [2, 3, 4, 5, 1, 0]
plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order],
prop={'size': 10}, bbox_to_anchor=(-0.15, 1.0),
title='number of data years')
plt.setp(plt.gca().get_legend().get_texts(), fontsize='x-large')
# isr.plot(ax=ax, column='cm_per_year', cmap='Greens',
# edgecolor='black', legend=True)
cmap = plt.get_cmap('spring', 10)
# Q = ax.quiver(isr['X'], isr['Y'], isr['U'], isr['V'],
# isr['cm_per_year'], cmap=cmap)
Q = ax.quiver(isr['X'], isr['Y'], isr['U'], isr['V'], cmap=cmap)
# fig.colorbar(Q, extend='max')
# qk = ax.quiverkey(Q, 0.8, 0.9, 1, r'$1 \frac{cm}{yr}$', labelpos='E',
# coordinates='figure')
if show_names:
annot1 = geo_annotate(ax, isr.geometry.x, isr.geometry.y, isr.index,
xytext=(3, 3))
annot2 = geo_annotate(ax, isr.geometry.x, isr.geometry.y,
vertical_label, xytext=(3, -10),
fmt='{:.1f}', fw='bold', colorupdown=True)
annot3 = geo_annotate(ax, isr.geometry.x, isr.geometry.y,
horizontal_label, xytext=(-20, 3),
fmt='{:.1f}', c='k', fw='normal')
# plt.legend(handles=[annot1, annot2, annot3])
plt.title(title)
plt.tight_layout()
# elif rel_plot is not None:
# # isr.drop('dsea', axis=0, inplace=True)
# fig, ax = plt.subplots(figsize=(10, 8))
# isr_with_yosh.plot(ax=ax)
# isr[(isr['years'] <= 5.0) & (isr['years'] >= 0.0)].plot(ax=ax, markersize=50, color='m', edgecolor='k', marker='o', label='0-5 yrs')
# isr[(isr['years'] <= 10.0) & (isr['years'] > 5.0)].plot(ax=ax, markersize=50, color='y', edgecolor='k', marker='o', label='5-10 yrs')
# isr[(isr['years'] <= 15.0) & (isr['years'] > 10.0)].plot(ax=ax, markersize=50, color='g', edgecolor='k', marker='o', label='10-15 yrs')
# isr[(isr['years'] <= 20.0) & (isr['years'] > 15.0)].plot(ax=ax, markersize=50, color='c', edgecolor='k', marker='o', label='15-20 yrs')
# isr[(isr['years'] <= 25.0) & (isr['years'] > 20.0)].plot(ax=ax, markersize=50, color='r', edgecolor='k', marker='o', label='20-25 yrs')
# plt.legend(prop={'size': 12}, bbox_to_anchor=(-0.15, 1.0), title='number of data years')
# # isr.plot(ax=ax, column='cm_per_year', cmap='Greens',
# # edgecolor='black', legend=True)
# isr['U'] = isr['rel_mm_east_{}'.format(rel_plot)]
# isr['V'] = isr['rel_mm_north_{}'.format(rel_plot)]
# cmap = plt.get_cmap('spring', 7)
## Q = ax.quiver(isr['X'], isr['Y'], isr['U'], isr['V'],
## isr['rel_mm_per_year_{}'.format(rel_plot)],
## cmap=cmap)
# Q = ax.quiver(isr['X'], isr['Y'], isr['U'], isr['V'], cmap=cmap)
## qk = ax.quiverkey(Q, 0.8, 0.9, 1, r'$1 \frac{mm}{yr}$', labelpos='E',
## coordinates='figure')
## fig.colorbar(Q, extend='max')
# plt.title('Relative to {} station'.format(rel_plot))
# geo_annotate(ax, isr.lon, isr.lat, isr.index, xytext=(3, 3))
# geo_annotate(ax, isr.lon, isr.lat, isr['rel_up_mm_per_year_{}'.format(rel_plot)],
# xytext=(3, -6), fmt='{:.2f}', fw='bold',
# colorupdown=True)
# geo_annotate(ax, isr.lon, isr.lat,
# isr['rel_mm_per_year_{}'.format(rel_plot)],
# xytext=(-21, 3), fmt='{:.2f}', c='k', fw='normal')
## for x, y, label in zip(isr.lon, isr.lat,
## isr.index):
## ax.annotate(label, xy=(x, y), xytext=(3, 3),
## textcoords="offset points")
# # print(isr[['rel_mm_east_{}'.format(rel_plot),'rel_mm_north_{}'.format(rel_plot)]])
return df
#def save_resampled_versions_gispyx_results(station='tela', sample_rate='H'):
# from aux_gps import path_glob
# import xarray as xr
# """resample gipsyx results nc files and save them.options for
# sample_rate are in sample dict"""
# path = GNSS / station / 'gipsyx_solutions'
# glob = '{}_PPP*.nc'.format(station.upper())
# try:
# file = path_glob(path, glob_str=glob)[0]
# except FileNotFoundError:
# print('did not find {} in gipsyx_solutions dir, skipping...'.format(station))
# return
# filename = file.as_posix().split('/')[-1].split('.')[0]
# years = filename.split('_')[-1]
# ds = xr.open_dataset(file)
# time_dim = list(set(ds.dims))[0]
# sample = {'H': 'hourly', 'W': 'weekly', 'MS': 'monthly'}
# print('resampaling {} to {}'.format(station, sample[sample_rate]))
# dsr = ds.resample({time_dim: sample_rate}, keep_attrs=True).mean(keep_attrs=True)
# new_filename = '_'.join([station.upper(), sample[sample_rate], 'PPP',
# years])
# new_filename = new_filename + '.nc'
# print('saving resmapled station {} to {}'.format(station, path))
# comp = dict(zlib=True, complevel=9) # best compression
# encoding = {var: comp for var in dsr.data_vars}
# dsr.to_netcdf(path / new_filename, 'w', encoding=encoding)
# print('Done!')
# return dsr
#def run_MLR_diurnal_harmonics_GNSS(path=work_yuval, season=None, site='tela',
# n_max=4, plot=True, ax=None):
# from sklearn.linear_model import LinearRegression
# from sklearn.metrics import explained_variance_score
# import xarray as xr
# import matplotlib.pyplot as plt
# harmonic = xr.load_dataset(path / 'GNSS_PW_harmonics_diurnal.nc')['{}_mean'.format(site)]
# if season is not None:
# harmonic = harmonic.sel(season=season)
# else:
# harmonic = harmonic.sel(season='ALL')
# pw = xr.open_dataset(path / 'GNSS_PW_anom_50_removed_daily.nc')[site]
# pw.load()
# if season is not None:
# pw = pw.sel(time=pw['time.season'] == season)
# pw = pw.groupby('time.hour').mean()
# # pre-proccess:
# harmonic = harmonic.transpose('hour', 'cpd')
# harmonic = harmonic.sel(cpd=slice(1, n_max))
# X = harmonic.values
# y = pw.values.reshape(-1, 1)
# exp_list = []
# for cpd in harmonic['cpd'].values:
# X = harmonic.sel(cpd=cpd).values.reshape(-1, 1)
# lr = LinearRegression(fit_intercept=False)
# lr.fit(X, y)
# y_pred = lr.predict(X)
# ex_var = explained_variance_score(y, y_pred)
# exp_list.append(ex_var)
# explained = np.array(exp_list) * 100.0
# exp_dict = dict(zip([x for x in harmonic['cpd'].values], explained))
# exp_dict['total'] = np.cumsum(explained)
# exp_dict['season'] = season
# exp_dict['site'] = site
# if plot:
# if ax is None:
# fig, ax = plt.subplots(figsize=(8, 6))
# markers = ['s', 'x', '^', '>', '<', 'X']
# for i, cpd in enumerate(harmonic['cpd'].values):
# harmonic.sel(cpd=cpd).plot(ax=ax, marker=markers[i])
# harmonic.sum('cpd').plot(ax=ax, marker='.')
# pw.plot(ax=ax, marker='o')
# S = ['S{}'.format(x) for x in harmonic['cpd'].values]
# S_total = ['+'.join(S)]
# S = ['S{} ({:.0f}%)'.format(x, exp_dict[int(x)]) for x in harmonic['cpd'].values]
# ax.legend(S+S_total+['PW'])
# ax.grid()
# ax.set_xlabel('Time of day [UTC]')
# ax.set_ylabel('PW anomalies [mm]')
# if season is None:
# ax.set_title('Annual PW diurnal cycle for {} site'.format(site.upper()))
# else:
# ax.set_title('PW diurnal cycle for {} site in {}'.format(site.upper(), season))
# return exp_dict
def calculate_diurnal_variability(path=work_yuval, with_amp=False):
import xarray as xr
import pandas as pd
import numpy as np
pw_anoms = xr.load_dataset(
work_yuval /
'GNSS_PW_thresh_50_for_diurnal_analysis_removed_daily.nc')
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw_anoms = pw_anoms[[x for x in pw_anoms if '_error' not in x]]
pw = pw[[x for x in pw if '_error' not in x]]
amp = np.abs(pw_anoms.groupby('time.hour').mean()).max()
pd.options.display.float_format = '{:.1f}'.format
df = 100.0 * (amp / pw.mean()
).to_array('station').to_dataframe('amplitude_to_mean_ratio')
if with_amp:
df['amplitude'] = amp.to_array('station').to_dataframe('amplitude')
seasons = ['JJA', 'SON', 'DJF', 'MAM']
for season in seasons:
season_mean = pw.sel(time=pw['time.season'] == season).mean()
season_anoms = pw_anoms.sel(time=pw_anoms['time.season'] == season)
diff_season = np.abs(season_anoms.groupby('time.hour').mean()).max()
df['amplitude_to_mean_ratio{}'.format(season)] = 100.0 * (diff_season / season_mean).to_array(
'station').to_dataframe('amplitude_to_mean_ratio_{}'.format(season))
if with_amp:
df['amplitude_{}'.format(season)] = diff_season.to_array(
'station').to_dataframe('amplitude_{}'.format(season))
return df
def perform_diurnal_harmonic_analysis_all_GNSS(path=work_yuval, n=6,
savepath=work_yuval, dss=None,
filename=None):
import xarray as xr
from aux_gps import harmonic_analysis_xr
from aux_gps import save_ncfile
if dss is None:
pw = xr.load_dataset(path / 'GNSS_PW_anom_50_for_diurnal_analysis_removed_daily.nc')
else:
pw = dss
dss_list = []
for site in pw:
print('performing harmonic analysis for GNSS {} site:'.format(site))
dss = harmonic_analysis_xr(pw[site], n=n, anomalize=False, normalize=False,
user_field_name=None)
dss_list.append(dss)
dss_all = xr.merge(dss_list)
dss_all.attrs['field'] = 'PW'
dss_all.attrs['units'] = 'mm'
if savepath is not None:
if filename is None:
filename = 'GNSS_PW_harmonics_diurnal.nc'
save_ncfile(dss_all, savepath, filename)
return dss_all
def extract_diurnal_freq_GNSS(path=work_yuval, eps=0.001, n=6):
"""extract the magnitude of the first n diurnal harmonics form the
GNSS power spectra"""
import xarray as xr
def extract_freq(power, eps=0.001, cen_freq=1):
freq_band = [cen_freq - eps, cen_freq + eps]
mag = power.sel(freq=slice(*freq_band)).mean('freq')
return mag
power = xr.load_dataset(path / 'GNSS_PW_power_spectrum_diurnal.nc')
diurnal_list = []
for station in power:
print('extracting {} freqs from station {}.'.format(n, station))
magnitudes = [extract_freq(power[station], eps=eps, cen_freq=(x+1)) for x in range(n)]
da = xr.DataArray(magnitudes, dims=['freq'])
da['freq'] = [x+1 for x in range(n)]
da.name = station
diurnal_list.append(da)
mag = xr.merge(diurnal_list)
return mag
def produce_GNSS_fft_diurnal(path=work_yuval, savepath=work_yuval, plot=False):
"""do FFT on the daily anomalies of the GNSS PW in order to find the
diurnal and sub-diurnal harmonics, and save them"""
from aux_gps import fft_xr
import xarray as xr
pw = xr.load_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw[[x for x in pw if '_error' not in x]]
fft_list = []
for station in pw:
da = fft_xr(pw[station], nan_fill='zero', user_freq=None, units='cpd',
plot=False)
fft_list.append(da)
power = xr.merge(fft_list)
if plot:
power.to_array('station').mean('station').plot(xscale='log')
if savepath is not None:
filename = 'GNSS_PW_power_spectrum_diurnal.nc'
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in power.data_vars}
power.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return power
def classify_tide_events(gnss_path=work_yuval, hydro_path=hydro_path,
station='tela', window='1D', sample='hourly',
hydro_station=48130):
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
kwargs = locals()
[kwargs.pop(key) for key in ['LogisticRegression', 'confusion_matrix', 'train_test_split',
'classification_report']]
lr = LogisticRegression(n_jobs=-1)
X, y = GNSS_pw_to_X_using_window(**kwargs)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
print(classification_report(y_test, y_pred))
return lr
def GNSS_pw_to_X_using_window(gnss_path=work_yuval, hydro_path=hydro_path,
station='tela', window='1D', sample='hourly',
hydro_station=60190):
"""assemble n window length gnss_pw data array with datetimes and
a boolean array of positive or negative tide events"""
import xarray as xr
from aux_gps import time_series_stack_with_window
# read PW and select station:
GNSS_pw = xr.open_dataset(gnss_path / 'GNSS_{}_PW.nc'.format(sample))
pw = GNSS_pw[station]
# create 1 day length data chunks from pw:
ds_X = time_series_stack_with_window(pw, window='1D')
# dropna:
ds_X = ds_X.dropna('start_date')
X = ds_X[station]
# read tides and select station:
tides = xr.open_dataset(hydro_path / 'hydro_tides.nc')
# select station:
tide = tides['TS_{}_max_flow'.format(hydro_station)]
# dropna:
tide = tide.dropna('tide_start')
# resample to ds_X time:
tide = tide.resample(tide_start=ds_X.attrs['freq']).mean()
tide = tide.dropna('tide_start')
# now build y:
y = np.empty(X.values.shape[0], dtype=bool)
start_date = X.start_date.values
points = X.points.size
tide_start = tide.tide_start.values
for i in range(len(start_date) - points):
st = start_date[i + points]
if st in tide_start:
y[i] = True
else:
y[i] = False
y = xr.DataArray(y, dims=['start_date'])
y['start_date'] = start_date
y.name = 'tide_events'
return X, y
def produce_all_GNSS_PW_anomalies(load_path=work_yuval, thresh=50,
grp1='hour', grp2='dayofyear',
remove_daily_only=False,
savepath=work_yuval, extra_name=None):
import xarray as xr
from aux_gps import anomalize_xr
if extra_name is not None:
GNSS_pw = xr.open_dataset(load_path / 'GNSS_PW_thresh_{:.0f}_{}.nc'.format(thresh, extra_name))
else:
GNSS_pw = xr.open_dataset(load_path / 'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
anom_list = []
stations_only = [x for x in GNSS_pw.data_vars if '_error' not in x]
for station in stations_only:
pw = GNSS_pw[station]
if remove_daily_only:
print('{}'.format(station))
pw_anom = anomalize_xr(pw, 'D')
else:
pw_anom = produce_PW_anomalies(pw, grp1, grp2, False)
anom_list.append(pw_anom)
GNSS_pw_anom = xr.merge(anom_list)
if savepath is not None:
if remove_daily_only:
if extra_name is not None:
filename = 'GNSS_PW_thresh_{:.0f}_{}_removed_daily.nc'.format(thresh, extra_name)
else:
filename = 'GNSS_PW_thresh_{:.0f}_removed_daily.nc'.format(thresh)
GNSS_pw_anom.attrs['action'] = 'removed daily means'
else:
filename = 'GNSS_PW_anom_{:.0f}_{}_{}.nc'.format(thresh, grp1, grp2)
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in GNSS_pw_anom.data_vars}
GNSS_pw_anom.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return GNSS_pw_anom
def process_alt_GNSS_stations_and_save(path=work_yuval, minor_filtering=True):
import xarray as xr
from aux_gps import keep_iqr
from aux_gps import save_ncfile
alt = load_gipsyx_results(field_all='alt')
alts = [keep_iqr(alt[x], k=2) for x in alt]
alt = xr.merge(alts)
filename = 'GNSS_alt.nc'
save_ncfile(alt, path, filename)
filename = 'GNSS_alt_hourly.nc'
alt_h = alt.resample(time='H', keep_attrs=True).mean(keep_attrs=True)
save_ncfile(alt_h, path, filename)
filename = 'GNSS_alt_daily.nc'
alt_d = alt.resample(time='D', keep_attrs=True).mean(keep_attrs=True)
save_ncfile(alt_d, path, filename)
filename = 'GNSS_alt_monthly.nc'
alt_m = alt.resample(time='MS', keep_attrs=True).mean(keep_attrs=True)
save_ncfile(alt_m, path, filename)
return
def perform_annual_harmonic_analysis_all_GNSS(path=work_yuval, field='PW',
era5=False, n=6, keep_full_years=True):
from aux_gps import harmonic_da_ts
from aux_gps import save_ncfile
from aux_gps import keep_full_years_of_monthly_mean_data
import xarray as xr
from aux_gps import anomalize_xr
if era5:
pw = xr.load_dataset(path / 'GNSS_era5_monthly_{}.nc'.format(field))
else:
pw = xr.load_dataset(path / 'GNSS_{}_monthly_thresh_50.nc'.format(field))
if keep_full_years:
print('kept full years only')
pw = pw.map(keep_full_years_of_monthly_mean_data, verbose=False)
pw = anomalize_xr(pw, freq='AS')
dss_list = []
for site in pw:
print('performing annual harmonic analysis for GNSS {} site:'.format(site))
# remove site mean:
pwv = pw[site] - pw[site].mean('time')
dss = harmonic_da_ts(pwv, n=n, grp='month')
dss_list.append(dss)
dss_all = xr.merge(dss_list)
dss_all.attrs['field'] = 'PWV'
dss_all.attrs['units'] = 'mm'
if era5:
filename = 'GNSS_{}_ERA5_harmonics_annual.nc'.format(field)
else:
filename = 'GNSS_{}_harmonics_annual.nc'.format(field)
save_ncfile(dss_all, path, filename)
return dss_all
def produce_PWV_anomalies_from_stacked_groups(pw_da, grp1='hour', grp2='dayofyear',
standartize=False, plot=True):
"""
use time_series_stack (return the whole ds including the time data)
to produce the anomalies per station. use standertize=True to divide the
anoms with std
Parameters
----------
pw_da : TYPE
DESCRIPTION.
grp1 : TYPE, optional
DESCRIPTION. The default is 'hour'.
grp2 : TYPE, optional
DESCRIPTION. The default is 'dayofyear'.
plot : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
pw_anom : TYPE
DESCRIPTION.
"""
from aux_gps import time_series_stack
import xarray as xr
from aux_gps import get_unique_index
from aux_gps import xr_reindex_with_date_range
from scipy import stats
import matplotlib.pyplot as plt
time_dim = list(set(pw_da.dims))[0]
fname = pw_da.name
print('computing anomalies for {}'.format(fname))
stacked_pw = time_series_stack(pw_da, time_dim=time_dim, grp1=grp1,
grp2=grp2, return_just_stacked_da=False)
pw_anom = stacked_pw.copy(deep=True)
attrs = pw_anom.attrs
rest_dim = [x for x in stacked_pw.dims if x != grp1 and x != grp2][0]
# compute mean on rest dim and remove it from stacked_da:
rest_mean = stacked_pw[fname].mean(rest_dim)
rest_std = stacked_pw[fname].std(rest_dim)
for rest in stacked_pw[rest_dim].values:
pw_anom[fname].loc[{rest_dim: rest}] -= rest_mean
if standartize:
pw_anom[fname].loc[{rest_dim: rest}] /= rest_std
# now, flatten anomalies to restore the time-series structure:
vals = pw_anom[fname].values.ravel()
times = pw_anom[time_dim].values.ravel()
pw_anom = xr.DataArray(vals, dims=[time_dim])
pw_anom.attrs = attrs
pw_anom[time_dim] = times
pw_anom = get_unique_index(pw_anom)
pw_anom = pw_anom.sortby(time_dim)
pw_anom = xr_reindex_with_date_range(pw_anom, freq=pw_anom.attrs['freq'])
pw_anom.name = fname
pw_anom.attrs['description'] = 'anomalies are computed from {} and {} groupings'.format(grp1, grp2)
if standartize:
pw_anom.attrs['action'] = 'data was also standartized'
if plot:
fig, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [4, 1]})
pw = pw_anom.dropna(time_dim).values
pw_anom.plot(ax=ax1)
pw_anom.plot.hist(bins=100, density=True, ax=ax2)
xt = ax2.get_xticks()
xmin, xmax = min(xt), max(xt)
lnspc = np.linspace(xmin, xmax, len(pw_anom.values))
# lets try the normal distribution first
m, s = stats.norm.fit(pw) # get mean and standard deviation
# now get theoretical values in our interval
pdf_g = stats.norm.pdf(lnspc, m, s)
ax2.plot(lnspc, pdf_g, label="Norm") # plot it
# exactly same as above
ag, bg, cg = stats.gamma.fit(pw)
pdf_gamma = stats.gamma.pdf(lnspc, ag, bg, cg)
ax2.plot(lnspc, pdf_gamma, label="Gamma")
# guess what :)
ab, bb, cb, db = stats.beta.fit(pw)
pdf_beta = stats.beta.pdf(lnspc, ab, bb, cb, db)
ax2.plot(lnspc, pdf_beta, label="Beta")
return pw_anom
def load_GNSS_TD(station='tela', sample_rate=None, plot=True):
"""load and plot temperature for station from IMS, to choose
sample rate different than 5 mins choose: 'H', 'W' or 'MS'"""
from aux_gps import path_glob
from aux_gps import plot_tmseries_xarray
import xarray as xr
sample = {'1H': 'hourly', '3H': '3hourly', 'D': 'daily', 'W': 'weekly',
'MS': 'monthly'}
path = ims_path
if sample_rate is None:
glob = 'GNSS_5mins_TD_ALL*.nc'
try:
file = path_glob(path, glob_str=glob)[0]
except FileNotFoundError as e:
print(e)
return station
else:
glob = 'GNSS_{}_TD_ALL*.nc'.format(sample[sample_rate])
try:
file = path_glob(path, glob_str=glob)[0]
except FileNotFoundError as e:
print(e)
return station
ds = xr.open_dataset(file)
da = ds[station]
if plot:
plot_tmseries_xarray(da)
return da
#def align_monthly_means_PW_and_T(path=work_yuval, ims_path=ims_path,
# thresh=50.0):
# """align monthly means PW and T for plots"""
# import xarray as xr
# pw = xr.load_dataset(path / 'GNSS_PW_thresh_{:.0f}.nc'.format(thresh))
# # get attrs dict:
# attrs = {}
# for station in pw.data_vars:
# attrs[station] = pw[station].attrs
# stations = [x for x in pw.data_vars]
# # resample to monthly means:
# pw = pw.resample(time='MS').mean('time')
# # copy attrs to each station:
# for station in pw.data_vars:
# pw[station].attrs = attrs[station]
# T = xr.load_dataset(ims_path / 'GNSS_monthly_TD_ALL_1996_2019.nc')
# T = T[stations]
# # rename T stations to T:
# for sta in T.data_vars.keys():
# T = T.rename({sta: sta + '_T'})
# combined = xr.merge([pw, T])
# filename = 'PW_T_monthly_means_thresh_{:.0f}.nc'.format(thresh)
# combined.to_netcdf(path / filename, 'w')
# print('saved {} to {}'.format(filename, path))
# return combined
def filter_month_year_data_heatmap_plot(da_ts, freq='5T', thresh=50.0,
verbose=True, plot=True):
"""accepts dataarray time series(with freq <1D) and removes the daily data
with less than thresh percent and then removes months with data less than
thresh percent. data is saved to dataarray with some metadata"""
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
name = da_ts.name
try:
freq = da_ts.attrs['freq']
except KeyError:
pass
# data points per day:
if freq == '5T':
points = 24 * 12
elif freq == '1H':
points = 24
elif freq == '3H':
points == 8
elif freq == '1D' or freq == 'D':
points = 1
if verbose:
print(
'analysing {} station with {} data points per day:'.format(
name, points))
# dropna:
df = da_ts.dropna('time').to_dataframe()
# calculate daily data to drop (if points less than threshold):
df['date'] = df.index.date
points_in_day = df.groupby(['date']).count()[name].to_frame()
# calculate total days with any data:
tot_days = points_in_day[points_in_day >0].dropna().count().values.item()
# calculate daily data percentage (from maximum available):
points_in_day['percent'] = (points_in_day[name] / points) * 100.0
# get the number of days to drop and the dates themselves:
number_of_days_to_drop = points_in_day[name][points_in_day['percent'] <= thresh].count()
percent_of_days_to_drop = 100.0 * \
number_of_days_to_drop / len(points_in_day)
days_to_drop = points_in_day.index[points_in_day['percent'] <= thresh]
if verbose:
print('found {} ({:.2f} %) bad days with {:.0f} % drop thresh.'.format(
number_of_days_to_drop, percent_of_days_to_drop, thresh))
# now drop the days:
for day_to_drop in days_to_drop:
df = df[df['date'] != day_to_drop]
# now calculate the number of months missing days with threshold:
df['month'] = df.index.month
df['year'] = df.index.year
df['max_points'] = df.index.days_in_month * points
cnt = df.groupby(['month', 'year']).count()[name].to_frame()
pivot = | pd.pivot_table(cnt, index='year', columns='month') | pandas.pivot_table |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from unittest import TestCase
from nose_parameterized import parameterized
from numpy import (nan)
from pandas import (
Series,
DataFrame,
date_range,
datetime,
Panel,
MultiIndex,
)
from pandas.util.testing import (assert_frame_equal,
assert_series_equal)
from .. utils import (compute_forward_returns,
quantize_factor,
common_start_returns)
class UtilsTestCase(TestCase):
dr = date_range(start='2015-1-1', end='2015-1-2')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = DataFrame(index=dr,
columns=tickers,
data=[[1, 2, 3, 4],
[4, 3, 2, 1]]).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
factor_data = DataFrame()
factor_data['factor'] = factor
factor_data['group'] = Series(index=factor.index,
data=[1, 1, 2, 2, 1, 1, 2, 2],
dtype="category")
def test_compute_forward_returns(self):
dr = date_range(start='2015-1-1', end='2015-1-3')
prices = DataFrame(index=dr, columns=['A', 'B'],
data=[[1, 1], [1, 2], [2, 1]])
fp = compute_forward_returns(prices, periods=[1, 2])
ix = MultiIndex.from_product([dr, ['A', 'B']],
names=['date', 'asset'])
expected = | DataFrame(index=ix, columns=[1, 2]) | pandas.DataFrame |
"""Provide ground truth."""
import logging
import os
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from tqdm import tqdm
logger = logging.getLogger(__name__)
def provide_ground_truth(main_dir, date, xml):
ind = xml.find('T')
time = xml[ind+1:ind+7]
overpass_time = datetime.strptime(date+time, '%Y%m%d%H%M%S')
lower_time = overpass_time - timedelta(minutes=30)
upper_time = overpass_time + timedelta(minutes=30)
if sorted(os.listdir('../data/ground_truth')) == sorted(os.listdir(
main_dir + '/ground_truth/')):
logger.info("CSV files already exist.")
return
for file in tqdm(os.listdir('../data/ground_truth'), desc='Create csv for %s .' % date):
logger.info('Extract data from %s' % file)
df = | pd.read_csv('../data/ground_truth/' + file, sep=',',index_col=0) | pandas.read_csv |
from matplotlib.pyplot import title
import requests
import json
import pandas as pd
import mplfinance as mpl
def plot_candlestick_graph(df):
df.date = | pd.to_datetime(df.date) | pandas.to_datetime |
import os
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian
import pandas as pd
from pandas import DataFrame, HDFStore, Series, _testing as tm, read_hdf
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io import pytables as pytables
from pandas.io.pytables import ClosedFileError, PossibleDataLossError, Term
pytestmark = pytest.mark.single
def test_mode(setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
msg = r"[\S]* does not exist"
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
with HDFStore(path, mode=mode) as store:
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
msg = (
"mode w is not allowed while performing a read. "
r"Allowed modes are r, r\+ and a."
)
with pytest.raises(ValueError, match=msg):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
msg = (
r"Re-opening the file \[[\S]*\] with mode \[a\] will delete the "
"current file!"
)
# invalid mode change
with pytest.raises(PossibleDataLossError, match=msg):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = | HDFStore(path, mode="a") | pandas.HDFStore |
from pandas._testing import assert_series_equal, assert_frame_equal
import pandas as pd
def test_types_assert_series_equal() -> None:
s1 = pd.Series([0, 1, 1, 0])
s2 = pd.Series([0, 1, 1, 0])
assert_series_equal(left=s1, right=s2)
assert_series_equal(s1, s2, check_freq=False, check_categorical=True, check_flags=True,
check_datetimelike_compat=True)
assert_series_equal(s1, s2, check_dtype=True, check_less_precise=True, check_names=True)
def test_types_assert_frame_equal() -> None:
df1 = pd.DataFrame(data={'col1': [1.0, 2.0], 'col2': [3.0, 4.0]})
df2 = pd.DataFrame(data={'col1': [1.0, 2.0], 'col2': [3.0, 4.0]})
| assert_frame_equal(df1, df2) | pandas._testing.assert_frame_equal |
import pandas as pd
from pathlib import Path
from pandarallel import pandarallel
from functools import partial
from .utils import LookupTable
from .language_model import SRILM
pandarallel.initialize(verbose=0)
class Corpus:
def __init__(self, root):
self.root = Path(root)
def load_data_frame(self, split):
raise NotImplementedError
def create_vocab(self):
df = self.load_data_frame("train")
sentences = df["annotation"].to_list()
return LookupTable(
[gloss for sentence in sentences for gloss in sentence],
allow_unk=True,
)
class PhoenixCorpus(Corpus):
mean = [0.53724027, 0.5272855, 0.51954997]
std = [1, 1, 1]
def __init__(self, root):
super().__init__(root)
def load_alignment(self):
dirname = self.root / "annotations" / "automatic"
# important to literally read NULL instead read it as nan
read = partial(pd.read_csv, sep=" ", na_filter=False)
ali = read(dirname / "train.alignment", header=None, names=["id", "classlabel"])
cls = read(dirname / "trainingClasses.txt")
df = | pd.merge(ali, cls, how="left", on="classlabel") | pandas.merge |
import datetime
import glob
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import csv
from matplotlib.dates import num2date, date2num
from mplfinance.original_flavor import candlestick_ochl
import sqlalchemy
from sqlalchemy import MetaData, Table, Column, Integer, String, Float, DateTime, ForeignKey
import config
import math
def import_data(csv_file, hdf_file, path):
"""Imports historical stock price data.
Import from consolidated .hdf if available, else import from consolidated .csv if available, else import data and
create consolidated files for future imports.
Parameters
----------
csv_file : sequence
The relative path to the consolidated .csv file.
hdf_file : sequence
The relative path to the consolidated .hdf file.
path : sequence
The path to the directory containing the historical data.
Raises
------
"""
if os.path.isfile(path + hdf_file):
df = | pd.read_hdf(path + hdf_file, 'table') | pandas.read_hdf |
"""Tools for generating and forecasting with ensembles of models."""
import datetime
import numpy as np
import pandas as pd
import json
from autots.models.base import PredictionObject
def BestNEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate mean forecast for ensemble of models."""
# id_list = list(ensemble_params['models'].keys())
# does it handle missing models well?
# model_indexes = [x for x in forecasts.keys() if x in id_list]
model_count = len(forecasts.keys())
if model_count < 1:
raise ValueError("BestN failed, no component models available.")
sample_df = next(iter(forecasts.values()))
columnz = sample_df.columns
indices = sample_df.index
ens_df = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in forecasts.items():
ens_df = ens_df + x
ens_df = ens_df / model_count
ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in lower_forecasts.items():
ens_df_lower = ens_df_lower + x
ens_df_lower = ens_df_lower / model_count
ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz)
for idx, x in upper_forecasts.items():
ens_df_upper = ens_df_upper + x
ens_df_upper = ens_df_upper / model_count
ens_runtime = datetime.timedelta(0)
for x in forecasts_runtime.values():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for distance ensemble."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
first_model_index = forecasts_list.index(ensemble_params['FirstModel'])
second_model_index = forecasts_list.index(ensemble_params['SecondModel'])
forecast_length = forecasts[0].shape[0]
dis_frac = ensemble_params['dis_frac']
first_bit = int(np.ceil(forecast_length * dis_frac))
second_bit = int(np.floor(forecast_length * (1 - dis_frac)))
ens_df = (
forecasts[first_model_index]
.head(first_bit)
.append(forecasts[second_model_index].tail(second_bit))
)
ens_df_lower = (
lower_forecasts[first_model_index]
.head(first_bit)
.append(lower_forecasts[second_model_index].tail(second_bit))
)
ens_df_upper = (
upper_forecasts[first_model_index]
.head(first_bit)
.append(upper_forecasts[second_model_index].tail(second_bit))
)
id_list = list(ensemble_params['models'].keys())
model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list]
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in model_indexes:
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result_obj = PredictionObject(
model_name="Ensemble",
forecast_length=len(ens_df.index),
forecast_index=ens_df.index,
forecast_columns=ens_df.columns,
lower_forecast=ens_df_lower,
forecast=ens_df,
upper_forecast=ens_df_upper,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result_obj
def summarize_series(df):
"""Summarize time series data. For now just df.describe()."""
df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9])
return df_sum
def horizontal_classifier(df_train, known: dict, method: str = "whatever"):
"""
CLassify unknown series with the appropriate model for horizontal ensembling.
Args:
df_train (pandas.DataFrame): historical data about the series. Columns = series_ids.
known (dict): dict of series_id: classifier outcome including some but not all series in df_train.
Returns:
dict.
"""
# known = {'EXUSEU': 'xx1', 'MCOILWTICO': 'xx2', 'CSUSHPISA': 'xx3'}
columnz = df_train.columns.tolist()
X = summarize_series(df_train).transpose()
known_l = list(known.keys())
unknown = list(set(columnz) - set(known_l))
Xt = X.loc[known_l]
Xf = X.loc[unknown]
Y = np.array(list(known.values()))
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(Xt, Y)
result = clf.predict(Xf)
result_d = dict(zip(Xf.index.tolist(), result))
# since this only has estimates, overwrite with known that includes more
final = {**result_d, **known}
# temp = pd.DataFrame({'series': list(final.keys()), 'model': list(final.values())})
# temp2 = temp.merge(X, left_on='series', right_index=True)
return final
def generalize_horizontal(
df_train, known_matches: dict, available_models: list, full_models: list = None
):
"""generalize a horizontal model trained on a subset of all series
Args:
df_train (pd.DataFrame): time series data
known_matches (dict): series:model dictionary for some to all series
available_models (dict): list of models actually available
full_models (dict): models that are available for every single series
"""
org_idx = df_train.columns
org_list = org_idx.tolist()
# remove any unavailable models or unnecessary series
known_matches = {ser: mod for ser, mod in known_matches.items() if ser in org_list}
k = {ser: mod for ser, mod in known_matches.items() if mod in available_models}
# check if any series are missing from model list
if not k:
raise ValueError("Horizontal template has no models matching this data!")
if len(set(org_list) - set(list(k.keys()))) > 0:
# filter down to only models available for all
# print(f"Models not available: {[ser for ser, mod in known_matches.items() if mod not in available_models]}")
# print(f"Series not available: {[ser for ser in df_train.columns if ser not in list(known_matches.keys())]}")
if full_models is not None:
k2 = {ser: mod for ser, mod in k.items() if mod in full_models}
else:
k2 = k.copy()
all_series_part = horizontal_classifier(df_train, k2)
# since this only has "full", overwrite with known that includes more
all_series = {**all_series_part, **k}
else:
all_series = known_matches
return all_series
def HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Generate forecast for per_series ensembling."""
# this is meant to fill in any failures
available_models = list(forecasts.keys())
train_size = df_train.shape
# print(f"running inner generalization with training size: {train_size}")
full_models = [
mod for mod, fcs in forecasts.items() if fcs.shape[1] == train_size[1]
]
if not full_models:
full_models = available_models # hope it doesn't need to fill
# print(f"FULLMODEL {len(full_models)}: {full_models}")
if prematched_series is None:
prematched_series = ensemble_params['series']
all_series = generalize_horizontal(
df_train, prematched_series, available_models, full_models
)
# print(f"ALLSERIES {len(all_series.keys())}: {all_series}")
org_idx = df_train.columns
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in all_series.items():
try:
c_fore = forecasts[mod_id][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
print(f"Horizontal ensemble unable to add model {repr(e)}")
# upper
c_fore = upper_forecasts[mod_id][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[mod_id][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
# make sure columns align to original
forecast_df.reindex(columns=org_idx)
u_forecast_df.reindex(columns=org_idx)
l_forecast_df.reindex(columns=org_idx)
# combine runtimes
ens_runtime = datetime.timedelta(0)
for idx, x in forecasts_runtime.items():
ens_runtime = ens_runtime + x
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
):
"""Generate forecast for per_series per distance ensembling."""
# handle that the inputs are now dictionaries
forecasts = list(forecasts.values())
lower_forecasts = list(lower_forecasts.values())
upper_forecasts = list(upper_forecasts.values())
forecasts_runtime = list(forecasts_runtime.values())
id_list = list(ensemble_params['models'].keys())
mod_dic = {x: idx for idx, x in enumerate(forecasts_list) if x in id_list}
forecast_length = forecasts[0].shape[0]
dist_n = int(np.ceil(ensemble_params['dis_frac'] * forecast_length))
dist_last = forecast_length - dist_n
forecast_df, u_forecast_df, l_forecast_df = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series1'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df = pd.concat([forecast_df, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df = pd.concat([u_forecast_df, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df = pd.concat([l_forecast_df, c_fore], axis=1)
forecast_df2, u_forecast_df2, l_forecast_df2 = (
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame(),
)
for series, mod_id in ensemble_params['series2'].items():
l_idx = mod_dic[mod_id]
try:
c_fore = forecasts[l_idx][series]
forecast_df2 = pd.concat([forecast_df2, c_fore], axis=1)
except Exception as e:
repr(e)
print(forecasts[l_idx].columns)
print(forecasts[l_idx].head())
# upper
c_fore = upper_forecasts[l_idx][series]
u_forecast_df2 = pd.concat([u_forecast_df2, c_fore], axis=1)
# lower
c_fore = lower_forecasts[l_idx][series]
l_forecast_df2 = pd.concat([l_forecast_df2, c_fore], axis=1)
forecast_df = pd.concat(
[forecast_df.head(dist_n), forecast_df2.tail(dist_last)], axis=0
)
u_forecast_df = pd.concat(
[u_forecast_df.head(dist_n), u_forecast_df2.tail(dist_last)], axis=0
)
l_forecast_df = pd.concat(
[l_forecast_df.head(dist_n), l_forecast_df2.tail(dist_last)], axis=0
)
ens_runtime = datetime.timedelta(0)
for idx, x in enumerate(forecasts_runtime):
if idx in list(mod_dic.values()):
ens_runtime = ens_runtime + forecasts_runtime[idx]
ens_result = PredictionObject(
model_name="Ensemble",
forecast_length=len(forecast_df.index),
forecast_index=forecast_df.index,
forecast_columns=forecast_df.columns,
lower_forecast=l_forecast_df,
forecast=forecast_df,
upper_forecast=u_forecast_df,
prediction_interval=prediction_interval,
predict_runtime=datetime.timedelta(0),
fit_runtime=ens_runtime,
model_parameters=ensemble_params,
)
return ens_result
def EnsembleForecast(
ensemble_str,
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=None,
prematched_series: dict = None,
):
"""Return PredictionObject for given ensemble method."""
s3list = ['best3', 'best3horizontal', 'bestn']
if ensemble_params['model_name'].lower().strip() in s3list:
ens_forecast = BestNEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
if ensemble_params['model_name'].lower().strip() == 'dist':
ens_forecast = DistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
hlist = ['horizontal', 'probabilistic']
if ensemble_params['model_name'].lower().strip() in hlist:
ens_forecast = HorizontalEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
df_train=df_train,
prematched_series=prematched_series,
)
return ens_forecast
if ensemble_params['model_name'].lower().strip() == 'hdist':
ens_forecast = HDistEnsemble(
ensemble_params,
forecasts_list,
forecasts,
lower_forecasts,
upper_forecasts,
forecasts_runtime,
prediction_interval,
)
return ens_forecast
def EnsembleTemplateGenerator(
initial_results, forecast_length: int = 14, ensemble: str = "simple"
):
"""Generate ensemble templates given a table of results."""
ensemble_templates = pd.DataFrame()
if 'simple' in ensemble:
ens_temp = initial_results.model_results.drop_duplicates(subset='ID')
ens_temp = ens_temp[ens_temp['Ensemble'] == 0]
# best 3, all can be of same model type
best3nonunique = ens_temp.nsmallest(3, columns=['Score'])
n_models = best3nonunique.shape[0]
if n_models == 3:
ensemble_models = {}
for index, row in best3nonunique.iterrows():
temp_dict = {
'Model': row['Model'],
'ModelParameters': row['ModelParameters'],
'TransformationParameters': row['TransformationParameters'],
}
ensemble_models[row['ID']] = temp_dict
best3nu_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'BestN',
'model_count': n_models,
'model_metric': 'best_score',
'models': ensemble_models,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
best3nu_params = pd.DataFrame(best3nu_params, index=[0])
ensemble_templates = pd.concat([ensemble_templates, best3nu_params], axis=0)
# best 3, by SMAPE, RMSE, SPL
bestsmape = ens_temp.nsmallest(1, columns=['smape_weighted'])
bestrmse = ens_temp.nsmallest(2, columns=['rmse_weighted'])
bestmae = ens_temp.nsmallest(3, columns=['spl_weighted'])
best3metric = pd.concat([bestsmape, bestrmse, bestmae], axis=0)
best3metric = best3metric.drop_duplicates().head(3)
n_models = best3metric.shape[0]
if n_models == 3:
ensemble_models = {}
for index, row in best3metric.iterrows():
temp_dict = {
'Model': row['Model'],
'ModelParameters': row['ModelParameters'],
'TransformationParameters': row['TransformationParameters'],
}
ensemble_models[row['ID']] = temp_dict
best3m_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'BestN',
'model_count': n_models,
'model_metric': 'mixed_metric',
'models': ensemble_models,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
best3m_params = pd.DataFrame(best3m_params, index=[0])
ensemble_templates = pd.concat([ensemble_templates, best3m_params], axis=0)
# best 3, all must be of different model types
ens_temp = (
ens_temp.sort_values('Score', ascending=True, na_position='last')
.groupby('Model')
.head(1)
.reset_index(drop=True)
)
best3unique = ens_temp.nsmallest(3, columns=['Score'])
# only run if there are more than 3 model types available...
n_models = best3unique.shape[0]
if n_models == 3:
ensemble_models = {}
for index, row in best3unique.iterrows():
temp_dict = {
'Model': row['Model'],
'ModelParameters': row['ModelParameters'],
'TransformationParameters': row['TransformationParameters'],
}
ensemble_models[row['ID']] = temp_dict
best3u_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'BestN',
'model_count': n_models,
'model_metric': 'best_score_unique',
'models': ensemble_models,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
best3u_params = pd.DataFrame(best3u_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
if 'distance' in ensemble:
dis_frac = 0.2
first_bit = int(np.ceil(forecast_length * dis_frac))
last_bit = int(np.floor(forecast_length * (1 - dis_frac)))
not_ens_list = initial_results.model_results[
initial_results.model_results['Ensemble'] == 0
]['ID'].tolist()
ens_per_ts = initial_results.per_timestamp_smape[
initial_results.per_timestamp_smape.index.isin(not_ens_list)
]
first_model = ens_per_ts.iloc[:, 0:first_bit].mean(axis=1).idxmin()
last_model = (
ens_per_ts.iloc[:, first_bit : (last_bit + first_bit)].mean(axis=1).idxmin()
)
ensemble_models = {}
best3 = initial_results.model_results[
initial_results.model_results['ID'].isin([first_model, last_model])
].drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
for index, row in best3.iterrows():
temp_dict = {
'Model': row['Model'],
'ModelParameters': row['ModelParameters'],
'TransformationParameters': row['TransformationParameters'],
}
ensemble_models[row['ID']] = temp_dict
best3u_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'Dist',
'model_count': 2,
'model_metric': 'smape',
'models': ensemble_models,
'dis_frac': dis_frac,
'FirstModel': first_model,
'SecondModel': last_model,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
best3u_params = pd.DataFrame(best3u_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
dis_frac = 0.5
first_bit = int(np.ceil(forecast_length * dis_frac))
last_bit = int(np.floor(forecast_length * (1 - dis_frac)))
not_ens_list = initial_results.model_results[
initial_results.model_results['Ensemble'] == 0
]['ID'].tolist()
ens_per_ts = initial_results.per_timestamp_smape[
initial_results.per_timestamp_smape.index.isin(not_ens_list)
]
first_model = ens_per_ts.iloc[:, 0:first_bit].mean(axis=1).idxmin()
last_model = (
ens_per_ts.iloc[:, first_bit : (last_bit + first_bit)].mean(axis=1).idxmin()
)
ensemble_models = {}
best3 = initial_results.model_results[
initial_results.model_results['ID'].isin([first_model, last_model])
].drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
for index, row in best3.iterrows():
temp_dict = {
'Model': row['Model'],
'ModelParameters': row['ModelParameters'],
'TransformationParameters': row['TransformationParameters'],
}
ensemble_models[row['ID']] = temp_dict
best3u_params = {
'Model': 'Ensemble',
'ModelParameters': json.dumps(
{
'model_name': 'Dist',
'model_count': 2,
'model_metric': 'smape',
'models': ensemble_models,
'dis_frac': dis_frac,
'FirstModel': first_model,
'SecondModel': last_model,
}
),
'TransformationParameters': '{}',
'Ensemble': 1,
}
best3u_params = pd.DataFrame(best3u_params, index=[0])
ensemble_templates = pd.concat(
[ensemble_templates, best3u_params], axis=0, ignore_index=True
)
if ('horizontal' in ensemble) or ('probabilistic' in ensemble):
# per_series = model.initial_results.per_series_mae.copy()
if 'horizontal' in ensemble:
per_series = initial_results.per_series_mae.copy()
elif 'probabilistic' in ensemble:
per_series = initial_results.per_series_spl.copy()
mods = | pd.Series() | pandas.Series |
# %%
import pandas as pd
import numpy as np
import pathlib
import matplotlib
import matplotlib.pyplot as plt
from our_plot_config import derived_dir, fig_dir, raw_dir, setplotstyle
# Call function that sets the plot style
setplotstyle()
# %%
# Input file
f_betas = derived_dir / '13f_sp500_unfiltered.parquet'
f_scraped = derived_dir / '13f_scraped.parquet'
# Figures
f_numowners = fig_dir / 'appfigure_a1.pdf'
fig_mgrs = fig_dir / 'figure3_nmgrs.pdf'
fig_nfirms = fig_dir / 'figure2_nfirms.pdf'
fig_ownership = fig_dir / 'figure4_inst_share.pdf'
# ### Read in the (Cleaned) Parquet Files
# - One for TR $\beta$
# - One for scraped $\beta$
# %%
df = | pd.read_parquet(f_betas) | pandas.read_parquet |
import collections
import copy
import hashlib
import json
import os
import pickle
import pandas as pd
import random
import time
from collections import defaultdict
from os.path import join
from shutil import rmtree
import numpy as np
import torch
import yaml
from data_helper import Task
dir_path = os.path.dirname(os.path.realpath(__file__))
def load_graph(input_edge_file,
projection_origin=defaultdict(lambda: defaultdict(set)),
reverse_origin=defaultdict(lambda: defaultdict(set))):
projections = copy.deepcopy(projection_origin)
reverse = copy.deepcopy(reverse_origin)
with open(input_edge_file, 'r', errors='ignore') as infile:
for line in infile.readlines():
e1, r, e2 = line.strip().split('\t')
e1 = int(e1)
e2 = int(e2)
r = int(r)
projections[e1][r].add(e2)
reverse[e2][r].add(e1)
return projections, reverse
def read_indexing(data_path):
ent2id = pickle.load(
open(os.path.join(data_path, "ent2id.pkl"), 'rb'))
rel2id = pickle.load(
open(os.path.join(data_path, "rel2id.pkl"), 'rb'))
id2ent = pickle.load(
open(os.path.join(data_path, "id2ent.pkl"), 'rb'))
id2rel = pickle.load(
open(os.path.join(data_path, "id2rel.pkl"), 'rb'))
return ent2id, rel2id, id2ent, id2rel
def list2tuple(l):
return tuple(list2tuple(x) if type(x) == list else x for x in l)
def tuple2list(t):
return list(tuple2list(x) if type(x) == tuple else x for x in t)
def flatten(l): return sum(map(flatten, l), []
) if isinstance(l, tuple) else [l]
def parse_time():
return time.strftime("%Y.%m.%d-%H:%M:%S", time.localtime())
def set_global_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def eval_tuple(arg_return):
"""Evaluate a tuple string into a tuple."""
if type(arg_return) == tuple:
return arg_return
if arg_return[0] not in ["(", "["]:
arg_return = eval(arg_return)
else:
splitted = arg_return[1:-1].split(",")
List = []
for item in splitted:
try:
item = eval(item)
except:
pass
if item == "":
continue
List.append(item)
arg_return = tuple(List)
return arg_return
def flatten_query(queries):
all_queries = []
for query_structure in queries:
tmp_queries = list(queries[query_structure])
all_queries.extend([(query, query_structure) for query in tmp_queries])
return all_queries
class Writer:
_log_path = join(dir_path, 'log')
def __init__(self, case_name, config, log_path=None, postfix=True, tb_writer=None):
if isinstance(config, dict):
self.meta = config
else:
self.meta = vars(config)
self.time = time.time()
self.meta['time'] = self.time
self.idstr = case_name
self.column_name = {}
if postfix:
self.idstr += time.strftime("%y%m%d.%H:%M:%S", time.localtime()) + \
hashlib.sha1(str(self.meta).encode('UTF-8')).hexdigest()[:8]
self.log_path = log_path if log_path else self._log_path
if os.path.exists(self.case_dir):
rmtree(self.case_dir)
os.makedirs(self.case_dir, exist_ok=False)
with open(self.metaf, 'wt') as f:
json.dump(self.meta, f)
def append_trace(self, trace_name, data):
if trace_name not in self.column_name:
self.column_name[trace_name] = list(data.keys())
assert len(self.column_name[trace_name]) > 0
if not os.path.exists(self.tracef(trace_name)):
with open(self.tracef(trace_name), 'at') as f:
f.write(','.join(self.column_name[trace_name]) + '\n')
with open(self.tracef(trace_name), 'at') as f:
f.write(','.join([str(data[c]) for c in self.column_name[trace_name]]) + '\n')
def save_pickle(self, obj, name):
with open(join(self.case_dir, name), 'wb') as f:
pickle.dump(obj, f)
def save_array(self, arr, name):
np.save(join(self.case_dir, name), arr)
def save_json(self, obj, name):
if not name.endswith('json'):
name += '.json'
with open(join(self.case_dir, name), 'wt') as f:
json.dump(obj, f)
def save_dataframe(self, obj, name):
if not name.endswith('csv'):
name += '.csv'
df = | pd.DataFrame.from_dict(data=obj) | pandas.DataFrame.from_dict |
"""Authors: Salah&Yassir"""
import functools
import numpy as np
import pandas as pd
import pickle as pk
import ABONO as abono
# dir xs list dial les colonnes li bghit tapliqui 3lihom
xs = ['eeg_{i}'.format(i=i) for i in range(0, 2000)]
# defini fonction:
def f(objs):
s = 0
for x in xs:
v = objs[x]
# hna dir traitement 3la list
s += v
return s
p8 = lambda x: (lambda objs: objs[x] ** 8)
mapper = {
'eeg_sum': f #dir smyat lcolonne jdida : fonction
}
for x in xs:
mapper[x] = p8(x)
newcols = list(mapper.keys())
dropcols = ['eeg_{i}'.format(i=i) for i in range(0, 2000)] + \
['respiration_x_{i}'.format(i=i) for i in range(0, 400)] + \
['respiration_y_{i}'.format(i=i) for i in range(0, 400)] + \
['respiration_z_{i}'.format(i=i) for i in range(0, 400)] + \
['user', 'night']
with abono.Session() as s: #Debug is true
prr = 'data/171212-161438/train.csv'
prrr = 'data/train.csv'
prr2 = 'data/171212-170858/test.csv'
mm = 'data/171212-161438/model.dat'
s.init_train()
s.init_model()
s.init_test()
pr = abono.Processer(s, newcols, mapper, dropcols)
with open(mm, 'rb') as ff:
model = pk.load(ff)
m = abono.model(pr, s, offset=0, length=None, model='en')#, model=model)
@abono.timed(s)
def main():
return m.run(cross_validate=True, processed_train_data=prrr)#, processed_test_data=prr2) # you can add the processed train set path here
rslt = main()
if type(rslt) == np.float64:
s.log('MSE: {mse}'.format(mse=rslt**0.5), rslts=True)
else:
s.log(rslt[1]**0.5)
| pd.DataFrame(rslt[0]) | pandas.DataFrame |
import os
import shutil
import time
import numpy as np
import pandas as pd
from jina import Document, DocumentArray, Flow
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from executor.executor import AnnLiteIndexer
Nq = 1
D = 128
top_k = 10
R = 5
n_cells = 64
n_subvectors = 64
n_queries = 1
BENCHMARK_SIMPLEINDEXER = False
BENCHMARK_ANNLITE = True
def _precision(predicted, relevant, eval_at):
"""
fraction of retrieved documents that are relevant to the query
"""
if eval_at == 0:
return 0.0
predicted_at_k = predicted[:eval_at]
n_predicted_and_relevant = len(set(predicted_at_k).intersection(set(relevant)))
return n_predicted_and_relevant / len(predicted)
def _recall(predicted, relevant, eval_at):
"""
fraction of the relevant documents that are successfully retrieved
"""
if eval_at == 0:
return 0.0
predicted_at_k = predicted[:eval_at]
n_predicted_and_relevant = len(set(predicted_at_k).intersection(set(relevant)))
return n_predicted_and_relevant / len(relevant)
def evaluate(predicts, relevants, eval_at):
recall = 0
precision = 0
for _predict, _relevant in zip(predicts, relevants):
_predict = np.array([int(x) for x in _predict])
recall += _recall(_predict, _relevant, top_k)
precision += _precision(_predict, _relevant, top_k)
return recall / len(predicts), precision / len(predicts)
def clean_workspace():
if os.path.exists('./SimpleIndexer'):
shutil.rmtree('./SimpleIndexer')
if os.path.exists('./workspace'):
shutil.rmtree('./workspace')
def create_data(n_examples, D):
np.random.seed(123)
Xtr, Xte = train_test_split(
make_blobs(n_samples=n_examples, n_features=D)[0].astype(np.float32),
test_size=1,
)
return Xtr, Xte
def create_data_online(n_examples, D, batch_size):
np.random.seed(123)
num = 0
while True:
Xtr_batch = make_blobs(n_samples=batch_size, n_features=D)[0].astype(np.float32)
yield DocumentArray([Document(embedding=x) for x in Xtr_batch])
num += batch_size
if num + batch_size >= n_examples:
break
if num < n_examples:
Xtr_batch = make_blobs(n_samples=n_examples - num, n_features=D)[0].astype(
np.float32
)
yield DocumentArray([Document(embedding=x) for x in Xtr_batch])
def create_test_data(D, Nq):
np.random.seed(123)
Xte = make_blobs(n_samples=Nq, n_features=D)[0].astype(np.float32)
return DocumentArray([Document(embedding=x) for x in Xte])
if BENCHMARK_SIMPLEINDEXER:
################ SimpleIndexer Benchmark BEGIN #################
n_datasets = [10001, 50001, 200001, 400001]
times = []
for n_examples in n_datasets:
time_taken = 0
clean_workspace()
Xtr, Xte = create_data(n_examples, D)
f = Flow().add(
uses='jinahub://SimpleIndexer',
uses_with={'match_args': {'metric': 'euclidean', 'limit': 10}},
)
docs = [Document(id=f'{i}', embedding=Xtr[i]) for i in range(len(Xtr))]
with f:
resp = f.post(
on='/index',
inputs=docs,
)
with f:
t0 = time.time()
resp = f.post(
on='/search',
inputs=DocumentArray([Document(embedding=Xte[0])]),
return_results=True,
)
time_taken = time.time() - t0
times.append(time_taken)
df = pd.DataFrame({'n_examples': n_datasets, 'times': times})
df.to_csv('simpleindexer.csv')
print(df)
clean_workspace()
################ SimpleIndexer Benchmark END #################
if BENCHMARK_ANNLITE:
################ AnnLite Benchmark BEGIN ######################
n_datasets = [10_000, 100_000, 500_000, 1_000_000, 10_000_000]
# n_datasets = [10_000, 100_000]
n_queries = [1, 8, 64]
batch_size = 4096
times = []
metas = {'workspace': './workspace'}
results = {}
for n_examples in n_datasets:
print(f'\n\nWorking with n_examples={n_examples}\n\n')
time_taken = 0
clean_workspace()
f = Flow().add(
uses=AnnLiteIndexer,
uses_with={
'dim': D,
'limit': 10,
},
uses_metas=metas,
)
# docs = [Document(id=f'{i}', embedding=Xtr[i]) for i in range(len(Xtr))]
docs = create_data_online(n_examples, D, batch_size)
results_current = {}
with f:
time_taken = 0
for batch in docs:
t0 = time.time()
resp = f.post(on='/index', inputs=batch, request_size=10240)
# This is done to avoid data creation time loaded in index time
time_taken += time.time() - t0
results_current['index_time'] = time_taken
times_per_n_query = []
with f:
for n_query in n_queries:
da_queries = create_test_data(D, n_query)
t_qs = []
for _ in range(R):
t0 = time.time()
resp = f.post(
on='/search',
inputs=da_queries,
return_results=True,
)
time_taken = time.time() - t0
t_qs.append(time_taken)
# remove warm-up
times_per_n_query.append(np.mean(t_qs[1:]))
results_current['query_times'] = times_per_n_query
print(f'==> query_times: {times_per_n_query}')
df = | pd.DataFrame({'results': results_current}) | pandas.DataFrame |
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from pyspark.ml.classification import RandomForestClassifier as RFCSpark
from xgboost import XGBClassifier
from gators.feature_selection.select_from_model import SelectFromModel
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data():
X = pd.DataFrame(
{
"A": [22.0, 38.0, 26.0, 35.0, 35.0, 28.11, 54.0, 2.0, 27.0, 14.0],
"B": [7.25, 71.28, 7.92, 53.1, 8.05, 8.46, 51.86, 21.08, 11.13, 30.07],
"C": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
}
)
y = pd.Series([0, 1, 1, 1, 0, 0, 0, 0, 1, 1], name="TARGET")
X_expected = X[["A", "B"]].copy()
model = XGBClassifier(
random_state=0,
subsample=1.0,
n_estimators=2,
max_depth=2,
eval_metric="logloss",
use_label_encoder=False,
)
obj = SelectFromModel(model=model, k=2).fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_ks():
X = ks.DataFrame(
{
"A": [22.0, 38.0, 26.0, 35.0, 35.0, 28.11, 54.0, 2.0, 27.0, 14.0],
"B": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"C": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
}
)
y = ks.Series([0, 1, 1, 1, 0, 0, 0, 0, 1, 1], name="TARGET")
X_expected = X[["A"]].to_pandas().copy()
model = RFCSpark(numTrees=2, maxDepth=1, labelCol=y.name, seed=0)
obj = SelectFromModel(model=model, k=2).fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_combined():
X = ks.DataFrame(
{
"A": [22.0, 38.0, 26.0, 35.0, 35.0, 28.11, 54.0, 2.0, 27.0, 14.0],
"B": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"C": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
}
)
y = ks.Series([0, 1, 1, 1, 0, 0, 0, 0, 1, 1], name="TARGET")
X_expected = X[["A"]].to_pandas().copy()
model = XGBClassifier(
random_state=0,
subsample=1.0,
n_estimators=2,
max_depth=2,
eval_metric="logloss",
use_label_encoder=False,
)
obj = SelectFromModel(model=model, k=2).fit(X, y)
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
"""
COLLECTION OF FUNCTIONS FOR PROTEIN SEQUENCE FEATURE CONSTRUCTION & BLAST PREDICTION
Created on Thu Nov 9 13:29:44 2017
@author: dimiboeckaerts
Some of the code below is taken from the following Github repo:
https://github.com/Superzchen/iFeature
(Chen et al., 2018. Bioinformatics.)
"""
# IMPORT LIBRARIES
# --------------------------------------------------
import os
import math
import warnings
import numpy as np
import scipy as sp
import datetime as dt
from numba import jit
import matplotlib.pyplot as plt
from Bio.Blast import NCBIWWW, NCBIXML
from Bio import SeqIO, Entrez, pairwise2
from Bio.SubsMat import MatrixInfo as matlist
from sklearn.preprocessing import label_binarize
from sklearn.exceptions import UndefinedMetricWarning
from Bio.Blast.Applications import NcbiblastpCommandline
from sklearn.model_selection import GridSearchCV, GroupKFold
from sklearn.metrics import accuracy_score, make_scorer, f1_score, precision_score, recall_score
from sklearn.metrics import precision_recall_curve, auc
# DNA FEATURES
# --------------------------------------------------
def dna_features(dna_sequences):
"""
This function calculates a variety of properties from a DNA sequence.
Input: a list of DNA sequence (can also be length of 1)
Output: a dataframe of features
"""
import numpy as np
import pandas as pd
from Bio.SeqUtils import GC, CodonUsage
A_freq = []; T_freq = []; C_freq = []; G_freq = []; GC_content = []
codontable = {'ATA':[], 'ATC':[], 'ATT':[], 'ATG':[], 'ACA':[], 'ACC':[], 'ACG':[], 'ACT':[],
'AAC':[], 'AAT':[], 'AAA':[], 'AAG':[], 'AGC':[], 'AGT':[], 'AGA':[], 'AGG':[],
'CTA':[], 'CTC':[], 'CTG':[], 'CTT':[], 'CCA':[], 'CCC':[], 'CCG':[], 'CCT':[],
'CAC':[], 'CAT':[], 'CAA':[], 'CAG':[], 'CGA':[], 'CGC':[], 'CGG':[], 'CGT':[],
'GTA':[], 'GTC':[], 'GTG':[], 'GTT':[], 'GCA':[], 'GCC':[], 'GCG':[], 'GCT':[],
'GAC':[], 'GAT':[], 'GAA':[], 'GAG':[], 'GGA':[], 'GGC':[], 'GGG':[], 'GGT':[],
'TCA':[], 'TCC':[], 'TCG':[], 'TCT':[], 'TTC':[], 'TTT':[], 'TTA':[], 'TTG':[],
'TAC':[], 'TAT':[], 'TAA':[], 'TAG':[], 'TGC':[], 'TGT':[], 'TGA':[], 'TGG':[]}
for item in dna_sequences:
# nucleotide frequencies
A_freq.append(item.count('A')/len(item))
T_freq.append(item.count('T')/len(item))
C_freq.append(item.count('C')/len(item))
G_freq.append(item.count('G')/len(item))
# GC content
GC_content.append(GC(item))
# codon frequency: count codons, normalize counts, add to dict
codons = [item[i:i+3] for i in range(0, len(item), 3)]
l = []
for key in codontable.keys():
l.append(codons.count(key))
l_norm = [float(i)/sum(l) for i in l]
for j, key in enumerate(codontable.keys()):
codontable[key].append(l_norm[j])
# codon usage bias (_b)
synonym_codons = CodonUsage.SynonymousCodons
codontable2 = {'ATA_b':[], 'ATC_b':[], 'ATT_b':[], 'ATG_b':[], 'ACA_b':[], 'ACC_b':[], 'ACG_b':[], 'ACT_b':[],
'AAC_b':[], 'AAT_b':[], 'AAA_b':[], 'AAG_b':[], 'AGC_b':[], 'AGT_b':[], 'AGA_b':[], 'AGG_b':[],
'CTA_b':[], 'CTC_b':[], 'CTG_b':[], 'CTT_b':[], 'CCA_b':[], 'CCC_b':[], 'CCG_b':[], 'CCT_b':[],
'CAC_b':[], 'CAT_b':[], 'CAA_b':[], 'CAG_b':[], 'CGA_b':[], 'CGC_b':[], 'CGG_b':[], 'CGT_b':[],
'GTA_b':[], 'GTC_b':[], 'GTG_b':[], 'GTT_b':[], 'GCA_b':[], 'GCC_b':[], 'GCG_b':[], 'GCT_b':[],
'GAC_b':[], 'GAT_b':[], 'GAA_b':[], 'GAG_b':[], 'GGA_b':[], 'GGC_b':[], 'GGG_b':[], 'GGT_b':[],
'TCA_b':[], 'TCC_b':[], 'TCG_b':[], 'TCT_b':[], 'TTC_b':[], 'TTT_b':[], 'TTA_b':[], 'TTG_b':[],
'TAC_b':[], 'TAT_b':[], 'TAA_b':[], 'TAG_b':[], 'TGC_b':[], 'TGT_b':[], 'TGA_b':[], 'TGG_b':[]}
for item1 in dna_sequences:
codons = [item1[l:l+3] for l in range(0, len(item1), 3)]
codon_counts = []
# count codons corresponding to codontable (not codontable2 because keynames changed!)
for key in codontable.keys():
codon_counts.append(codons.count(key))
# count total for synonymous codons, divide each synonym codon count by total
for key_syn in synonym_codons.keys():
total = 0
for item2 in synonym_codons[key_syn]:
total += codons.count(item2)
for j, key_table in enumerate(codontable.keys()):
if (key_table in synonym_codons[key_syn]) & (total != 0):
codon_counts[j] /= total
# add corrected counts to codontable2 (also corresponds to codontable which was used to count codons)
for k, key_table in enumerate(codontable2.keys()):
codontable2[key_table].append(codon_counts[k])
# make new dataframes & standardize
features_codonbias = pd.DataFrame.from_dict(codontable2)
features_dna = | pd.DataFrame.from_dict(codontable) | pandas.DataFrame.from_dict |
import os
import json
import datetime
import argparse
import pandas as pd
import config, utils
import inf_outf
def bitcoin_data():
"Gets the OHLCV for Bitcoin over the period"
pair = "xbtusd_bitmex" # the pair we want to look at
url = "https://web3api.io/api/v2/market/ohlcv/"+pair+"/historical"
# querystring with the correct formatting
querystring = {
"exchange":"bitmex",
"startDate":config.startTime,
"endDate":config.endTime,
"timeInterval":"days"
}
# add our api key
headers = {'x-api-key': config.api_key}
# get the result from REST API
payload = utils.get_response(url, headers=headers, queryString=querystring)
# Format into the desired format
ohlcv = | pd.DataFrame(payload["data"]["bitmex"], columns=payload["metadata"]["columns"]) | pandas.DataFrame |
from operator import mul
import sys
import matplotlib.pyplot as plt
import numpy as np
from holoviews import opts
from scipy.signal.ltisys import dfreqresp
from scipy.spatial import Voronoi
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
import pandas as pd
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual, Text, interactive_output
from ipywidgets import Button, HBox, VBox,Layout,Label
import panel as pn
import seaborn as sns
from kneed import KneeLocator
from PatientGraphPheno import *
from patientKG.config.bedrock_connection import *
#from patientKG import utils_pickle
import patientKG.utils_pickle
from holoviews.operation.datashader import datashade, bundle_graph
import holoviews as hv
from holoviews import opts
from datetime import datetime
import re
import plotly.graph_objects as go
from pivottablejs import pivot_ui
from IPython.display import display, HTML
from sklearn.feature_selection import VarianceThreshold
from sklearn import preprocessing
import urllib, json
sns.set(style="ticks")
hv.extension('bokeh')
defaults = dict(width=1000, height=1000, padding=0.1)
from patientKG.tests.test_graphs import *
from ipywidgets import TwoByTwoLayout
import itertools
import time
from IPython.display import IFrame
import json, io
from patientKG.priorKnowledge.Hb1AC import *
from patientKG.priorKnowledge.Albumin import *
from patientKG.priorKnowledge.FBC import *
from patientKG.priorKnowledge.Creactive import *
from scipy.stats import chi2_contingency
import scipy.stats as stats
def show_SpellHRG_HRG_Table(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = patientKG.utils_pickle.read("Degree_ReAdmitted_HRG")
return Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == HRG)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
#This below block is for Jupyter-Notebook
"""stats = interact(PatientGraphVisuExplore.show_SpellHRG_HRG_Table,
HRG=widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_Income_Scatter(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == HRG)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
plt.scatter(data['Sum_Degree'], data['INCOME'], edgecolors='r')
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_Income_Scatter,
HRG=widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_LOS_Scatter(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
plt.scatter(data['Sum_Degree'], data['Total_LOS'], edgecolors='r')
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_LOS_Scatter,
HRG=widgets.SelectMultiple(
options=list(Degree_HRG['SpellHRG'].dropna().unique()),
value=['WJ06E'],
#rows=10,
description='HRG',
disabled=False
)
#widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_Pairplot(HRG,Degree,Readmit):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
sns.pairplot(df[df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE','ReAdmitted in DAYS'])], hue="SpellHRG")
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_Pairplot,
HRG=widgets.SelectMultiple(
options=list(Degree_HRG['SpellHRG'].dropna().unique()),
value=['WJ06E'],
#rows=10,
description='HRG',
disabled=False
)
#widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_ICD(HRG,ICD,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
if ICD == None:
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
else:
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellPDiag'] == ICD)&(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME']#,'Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','age','sex','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','SpellPDiag','SpellSDiag']]#,'ALL_DIAG']]
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG_ICD,
HRG=widgets.SelectMultiple(
options=
init_code,
#list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
),
ICD=widgets.Dropdown(
options=
#init_code,
sorted(list(Degree_HRG['SpellPDiag'].dropna().unique())),value=None
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_ICD(ICD,Degree,Age,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in ICD:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellPDiag'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['age'].astype(int)>=Age[0])&(Degree_ReAdmitted_HRG['age'].astype(int) <=Age[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME']#,'Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['age','sex','SpellHRG']+features+ ['POD_CODE','SpellPDiag','SpellSDiag']]#,'ALL_DIAG']]
#This block is for Jupyter-Notebook script
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_ICD,
ICD=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellPDiag'].dropna().unique()),
value=['A415'],
#rows=10,
description='ICD',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Age=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=100,
step=1,
description='Age:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')) """
def plot_SpellHRG_HRG(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG']]
#The below block is for Jupyter-Notebook
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG,
HRG=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_Degree(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
Degree_ReAdmitted_HRG = Degree_ReAdmitted_HRG[Degree_ReAdmitted_HRG['SpellHRG'].notna()]
Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] = Degree_ReAdmitted_HRG['ReAdmitted in DAYS'].fillna(-1)
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
principalComponents = sliced_principle_components(df,features,2)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
kmax = 10
best_n = best_eblow_k(principalDf.values.tolist(),kmax = 10)
df = plot_vor(df,principalComponents, best_n)
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG_Degree,
HRG=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_Degree_PairCompare(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("../Degree_ReAdmitted_HRG")
Degree_ReAdmitted_HRG = Degree_ReAdmitted_HRG[Degree_ReAdmitted_HRG['SpellHRG'].notna()]
Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] = Degree_ReAdmitted_HRG['ReAdmitted in DAYS'].fillna(-1)
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
principalComponents = sliced_principle_components(df,features,2)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
kmax = 10
best_n = best_eblow_k(principalDf.values.tolist(),kmax = 10)
df = plot_vor(df,principalComponents, best_n)
features.append('labels')
sns.pairplot(df[features], hue="labels", diag_kws={'bw':'1.0'})
#df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
def multi_table(df, title, addAll=True):
fig = go.Figure()
fig.add_trace(go.Table(
header=dict(values=list(df.columns)),
cells=dict(values=df.transpose().values.tolist()
)
))
button_all = dict(label='All',
method='update',
args=[{'visible':df['labels'].isin(list(df['labels'].unique())),'title':'All','showlegend':True}])
def create_layout_button(column):
return dict(label=column, method='update', args=[{'visible':df['labels'].isin([column]),'title':column,'showlegend':True}])
fig.update_layout(updatemenus=[go.layout.Updatemenu(active=0, buttons=([button_all]*addAll)+list(df['labels'].map(lambda column:create_layout_button(column))))],yaxis_type="log")
fig.show()
return
def sankey(df):
labels = ['Total']
source=[]
target=[]
value=[]
color = ['#57c19c']
color_map={'g':'green','r':'red','y':'yellow','b':'blue'}
total = df['activity_identifier'].nunique()
grouped = pd.DataFrame(df.groupby(['labels','Label','color'])['activity_identifier'].nunique()).reset_index()
for item in sorted(df['labels'].unique()):
labels.append(str(item))
source.append(0)
target.append(labels.index(str(item)))
value.append(grouped[grouped['labels']==item]['activity_identifier'].sum())
color.append(str(color_map[grouped[grouped['labels']==item]['color'].unique()[0]]))
for item in sorted(df['labels'].unique()):
for item2 in sorted(df['Label'].unique()):
try:
num = int(grouped[(grouped['labels']==item)&(grouped['Label']==item2)]['activity_identifier'])
labels.append(str(item+"_"+item2))
except:
continue
color.append('black')
color.append('pink')
for index,row in grouped.iterrows():
source_label, target_label,value_ = row['labels'], row['Label'],row['activity_identifier']
source.append(labels.index(str(source_label)))
target.append(labels.index(str(source_label+"_"+target_label)))
value.append(value_)
percentage_node = ["{:.2f}".format(total/total*100)+"%"]
diff = list(set(source)-set([0]))
i=0
cn =0
while i < len(source):
if source[i] == 0:
percentage_node.append("{:.2f}".format(value[i]/total*100)+"%")
cn+=1
i+=1
while cn < len(source):
percentage_node.append("{:.2f}".format(value[cn]/value[target.index(source[cn])]*100)+"%")
cn+=1
percentage_link = ["{:.2f}".format(total/total*100)+"%", "60%", "70%", "60%", "100%"]
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 15,
thickness = 20,
line = dict(color = "black", width = 0.5),
label = labels,
customdata = percentage_node,
hovertemplate='%{label}: %{value}(%{customdata})<extra></extra>',
color = color
),
link = dict(
source = source, # indices correspond to labels, eg A1, A2, A1, B1, ...
target = target,
value = value,
#customdata = percentage_link,
#hovertemplate='Link from %{source.label}<br />'+
#'to %{target.label}<br /> %{value}(%{customdata})'+
#'<extra></extra>',
))])
#
return fig.update_layout(title_text="Cluster via Outcome", font_size=10)#labels, source, target, value
def plot_Spell_PU_Degree_PairCompare(Label, Expected_LOS,selected_list,Age,Waterlow_Standard,features = ['Complex_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree'] ):
def pop_std(x):
return x.std(ddof=0)
all_modelled_events = ['WardStay,LabTest', 'WardStay',
'WardStay,Waterlow,LabTest,PatientPosition',
'WardStay,Waterlow,LabTest,Skinasses,TV,PatientPosition',
'WardStay,Waterlow,LabTest',
'WardStay,Waterlow,LabTest,Skinasses,PatientPosition',
'WardStay,Waterlow,LabTest,TV,PatientPosition',
'WardStay,Waterlow,Skinasses,PatientPosition',
'WardStay,Waterlow,PatientPosition', 'WardStay,PatientPosition',
'WardStay,LabTest,PatientPosition', 'WardStay,Waterlow',
'WardStay,Skinasses', 'WardStay,Skinasses,PatientPosition',
'WardStay,Waterlow,Skinasses', 'WardStay,LabTest,Skinasses',
'WardStay,LabTest,Skinasses,PatientPosition',
'WardStay,Waterlow,Skinasses,TV,PatientPosition',
'WardStay,LabTest,Skinasses,TV',
'WardStay,Waterlow,TV,PatientPosition',
'WardStay,Waterlow,LabTest,Skinasses',
'WardStay,LabTest,Skinasses,TV,PatientPosition',
'WardStay,LabTest,TV', 'WardStay,LabTest,TV,PatientPosition',
'WardStay,Waterlow,LabTest,TV', 'WardStay,TV,PatientPosition',
'WardStay,Waterlow,TV', 'WardStay,TV', 'WardStay,Skinasses,TV',
'WardStay,Waterlow,LabTest,Skinasses,TV']
selected_list=list(selected_list)
selected_list.append('WardStay')
modelled_events =[]
for item in all_modelled_events:
#print(item.split(','))
#print(selected_list)
if set(item.split(','))==set(selected_list):
modelled_events.append(item)
if len(modelled_events)==0:
print("No Events!")
return
Waterlow_Compliance = list(Waterlow_Standard)
if len(Waterlow_Compliance)==1 and Waterlow_Compliance[0]!='Rule 1: Use Waterlow' and Waterlow_Compliance[0]!='No Waterlow':
return "In RBH we only use Waterlow!"
diction={'Rule 1: Use Waterlow':{'rule 1': 'Pass'}, 'Rule 2: 4 Hours Admission':{'rule 2': 'Pass'}, 'Rule 3: AE 4hours':{'rule 3': 'Pass'}, 'Rule 4: Ward Transfer 4hours':{'rule 4': 'Pass'},'No Waterlow':'No Waterlow'}
waterlow_group=[]
rule_group={}
for index, key in enumerate(diction):
rule_number = index+1
if key != 'No Waterlow':
if key in Waterlow_Compliance:
rule_group.update(diction[key])
else:
rule_group.update({'rule {}'.format(rule_number):'Fail'})
else:
waterlow_group.append(diction[key])
waterlow_group.append(str(rule_group))
df = pd.DataFrame()
Degree_ReAdmitted_HRG = patientKG.utils_pickle.read("PU_RESULT")
#Degree_ReAdmitted_HRG = Degree_ReAdmitted_HRG[Degree_ReAdmitted_HRG['SpellHRG'].notna()]
#Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] = Degree_ReAdmitted_HRG['ReAdmitted in DAYS'].fillna(-1)
# for item in Label:
#print(item)
los_dic= {"Expected Long for HRG":"Normal", "Unexpected Long for HRG":"Abnormal","Unexpected short - live discharge":"Not yet", 'Unknown': 'Unknown'}
LOS_LIST =[]
for item in Expected_LOS:
LOS_LIST.append(los_dic[item])
try:
df=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['Label'].isin(Label))&(Degree_ReAdmitted_HRG['Expected_LOS'].isin(LOS_LIST))& (Degree_ReAdmitted_HRG['modelled_events'].isin(modelled_events))
&(((Degree_ReAdmitted_HRG['HPS_AGE_AT_ADMISSION_DATE']>=Age[0])
&(Degree_ReAdmitted_HRG['HPS_AGE_AT_ADMISSION_DATE'] <=Age[1])))
&(Degree_ReAdmitted_HRG['Waterlow_Standard'].isin(waterlow_group))
#&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))
]
# df = pd.concat([df,data])
except:
return "No Sample!"
#features = ['Sum_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree']
principalComponents,pca_explained,pca_components = sliced_principle_components(df,features,2)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
kmax = 10
best_n = best_eblow_k(principalDf.values.tolist(),kmax = 10)
try:
df = plot_vor(df,principalComponents, best_n)
except:
df = plot(df,principalComponents, best_n)
#print(list(features))
#Section Outcomes to Estimated groups
total = df['activity_identifier'].nunique()
outcomes = df.groupby(['labels','Label'])['activity_identifier'].nunique()
fig = sankey(df)
fig.show()
#Section phenotype table with variables
selector = VarianceThreshold()
x = df[list(features)].values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_x = pd.DataFrame(x_scaled)
selector.fit_transform(df_x)
#print(selector.variances_)
feature_list = []
for item in features:
feature_list.append(item)
for i in range(len(pca_components)):
for item in pca_components[i]:
result_i=[item*pca_explained[i]]
features_score = selector.variances_
feature_rank = pd.DataFrame(list(zip(features, features_score)), columns=['variable_name','score'])
#print(feature_rank)
test3 =pd.DataFrame()
#print(test3)
for item in df[feature_list].columns:
sub_df = df[df[item]!=0]
test1=sub_df.groupby(['labels'])[item].agg({item:'mean'}).T
test2=sub_df.groupby(['labels'])[item].agg({item:pop_std}).T
test4 =pd.DataFrame()
for item in sub_df['labels'].unique():
test4[item] = test1[item].round(2).astype(str)+" (\u00B1"+test2[item].round(2).astype(str)+")"
test3=test3.append(test4)
test3 = test3.reindex(sorted(test3.columns),axis=1)
test3['variable_name'] = test3.index
#print(test3)
test3 =test3.merge(feature_rank, how='left', on='variable_name')
#test3 = test3.set_index('variable_name')
test5=pd.DataFrame(df.groupby(['labels'])['activity_identifier'].agg({'activity_identifier':lambda x: x.nunique()}).T)
test3 = test3.sort_values(by='score',ascending=False)
test3=pd.concat([test5,test3]).set_index('variable_name')
display(test3)
all_features = feature_list.copy()
if len(feature_list)>5:
feature_list = list(test3.sort_values(by='score',ascending=False).index[:5].values)
feature_list.append('labels')
lis_ = df[['labels','color']].drop_duplicates()
palette={y['labels']:str(y['color']) for x,y in lis_.iterrows()}
sns.pairplot(df[feature_list], hue="labels", diag_kws={'bw':'1.0'},palette=palette)
#df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','ALL_DIAG','labels']]
#df[(df['Waterlow_Standard'] == Waterlow_Standard)][['ACTIVITY_IDENTIFIER']+features+ ['labels']]
return df[['activity_identifier']+all_features+ ['Waterlow_Standard','Careplan','Label','labels','Expected_LOS']]
def transform(PU_RESULT):
PU_RESULT = PU_RESULT.replace(0,np.NaN)
avg_list = ['WL - Waterlow Score','Mean cell volume', 'Haematocrit', 'Red blood cell count',
'Basophil count', 'White blood cell count', 'Mean cell haemoglobin',
'Neutrophil count', 'Eosinophil count', 'Haemoglobin',
'Lymphocyte count', 'Platelet count', 'Mean cell haemoglobin conc',
'Monocyte count', 'Haemoglobin A1c IFCC', 'C-reactive protein',
'Glucose fasting', 'Glucose random', 'Glucose, CSF',
'Glucose, dialysis fluid', 'Glucose, fluid', 'Albumin']
concate_list = [
'WL - Age',
'WL - Broken Type',
'WL - Build/Weight for Height',
'WL - Continence',
'WL - Gender',
'WL - Lack of Appetite',
'WL - Major Surgery / Trauma',
'WL - Medication',
'WL - Mobility',
'WL - Neurological Deficit',
'WL - Recent Weight Loss',
'WL - Skin Type',
'WL - Tissue Malnutrition',
'WL - Weight Lost',
'PATIENTPOSITION',
'Referral Status Tissue Viability',
'Wound Status',
'Photograph Wound',
'Wound Width',
'Wound Depth',
'Wound Exudate Odour',
'Dressing Type:',
'Wound Surrounding Tissue Colour',
'Wound Cleansing',
'Dressing Assessment:',
'Wound Undermining Location',
'Wound Tunneling Location',
'Wound Odour',
'Already Being Cared for in the Community',
'Wound Exudate Colour',
'Equipment Induced Pressure Ulcer',
'Wound Edge',
'Wound Percent Epithelialised:',
'Equipment Type',
'Wound Dressing Activity',
'Wound Colour',
'Next Dressing Change',
'Wound Length',
'Wound Percent Tissue Eschar',
'Pressure Ulcer Datix Number',
'Pressure Ulcer Datix completed',
'Consent to Photograph',
'Wound Percent Granulated',
'Wound Percent Tissue Slough',
'Wound Type - Wound Assessment',
'Wound Tunneling Depth',
'Wound Exudate Volume',
'Wound Undermining Depth',
'Wound Exudate Type',
'Wound Surrounding Tissue',
'Pressure Ulcer/Blister Category'
]
max_list = ['modelled_events',
'local_patient_identifier',
'all_codes',
'all_hrg',
'HPS_ACTIVITY_DATE_TIME',
'HPS_DISCHARGE_DATE_TIME_HOSPITAL_PROVIDER_SPELL',
'Complex_Degree',
'Global_Central',
'Total_LOS',
'Turnaround_Degree',
'Waterlow_Standard',
'Careplan',
'HPS_ADMISSION_METHOD_CODE_HOSPITAL_PROVIDER_SPELL',
'HPS_AGE_AT_ADMISSION_DATE',
'PERSON_MARITAL_STATUS_CODE_DESC','weight',
'height','Pressure Ulcer Present On Admission',
'Label','DT_ATRISK','ward_move','careplan_ontime','numberof_repositioning','carplan_numberof_repositioning','careplan_compliance_degree']
for item in concate_list:
PU_RESULT[item] = PU_RESULT.groupby(['activity_identifier'])[item].transform(lambda x: ' '.join(str(x)))
PU_RESULT = PU_RESULT.drop_duplicates()
print("Concate Finished")
for item in avg_list:
PU_RESULT[item] = PU_RESULT.groupby(['activity_identifier'])[item].transform(np.mean)
PU_RESULT = PU_RESULT.drop_duplicates()
print("Avg Finished")
for item in max_list:
try:
PU_RESULT[item] = PU_RESULT.groupby(['activity_identifier'])[item].transform(np.max)
except:
PU_RESULT[item] = PU_RESULT[item].astype(str)
PU_RESULT[item] = PU_RESULT.groupby(['activity_identifier'])[item].transform(np.max)
PU_RESULT = PU_RESULT.drop_duplicates()
PU_RESULT = PU_RESULT.drop_duplicates()
return PU_RESULT
def check_blood_normal(Reference_Range,input_node_fields,PU_RESULT):
for item in input_node_fields:
print(item)
ref_inuse ={}
for key, value in Reference_Range[item].items():
if key == 'Male':
ref_inuse.update({'Sex is male':value})
elif key == 'Female':
ref_inuse.update({'Sex is female':value})
elif key == 'Unknown':
ref_inuse.update({'Sex is unknown':value})
else:
ref_inuse.update({key:value})
PU_RESULT[item +'_normal'] = PU_RESULT.apply(lambda row: -1 if (pd.isnull(row[item]))else (1 if float(ref_inuse[row['PERSON_GENDER_CODE_DESC']]['min']) <= row[item]<=float(ref_inuse[row['PERSON_GENDER_CODE_DESC']]['max']) else 0),axis=1)
return PU_RESULT
def apply_tag(PU_RESULT):
PU_RESULT['Age_Group'] = PU_RESULT.apply(lambda row: 'Over 60' if row['HPS_AGE_AT_ADMISSION_DATE'] >=60 else 'Under 60',axis=1)
PU_RESULT['Gender_Group'] = PU_RESULT.apply(lambda row: 'Male' if row['PERSON_GENDER_CODE_DESC'] =='Sex is male' else ('Female' if row['PERSON_GENDER_CODE_DESC'] =='Sex is female' else 'Other'),axis=1)
PU_RESULT['Risk_Group'] = PU_RESULT.apply(lambda row: 'PU High Risk' if row['WL - Waterlow Score'] >10 else 'PU Low Risk',axis=1)
PU_RESULT['PU_Group'] = PU_RESULT.apply(lambda row: 'PU Patient' if row['Label'] =='Diagnosed_PU' else 'No PU',axis=1)
PU_RESULT['Surgery_Group'] = PU_RESULT.apply(lambda row: 'Surgical Patient' if row['Surgery'] =='1' else 'No Surgical',axis=1)
PU_RESULT['BMI_Group'] = PU_RESULT.apply(lambda row: 'Unknown BMI - Missing value' if (row['height']==0 or row['weight'] ==0)else ('Obese' if (row['weight']/row['height'])*100 >=30 else ('Under Weight' if (row['weight']/row['height'])*100 <18.5 else ('Healthy' if 18.5<=(row['weight']/row['height'])*100<25 else 'Over Weight' ))),axis=1)
PU_RESULT['Cohort_Group'] = PU_RESULT[['Age_Group', 'Gender_Group', 'Risk_Group','PU_Group','BMI_Group','Surgery_Group']].agg(','.join, axis=1)
PU_RESULT['Waterloo Assessment pass'] = PU_RESULT.apply(lambda row: 1 if row['Waterlow_Standard'] == "{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}" else 0,axis=1)
PU_RESULT['Waterloo Assessment fail'] = PU_RESULT.apply(lambda row: 1 if row['Waterlow_Standard'] != "{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}" else 0,axis=1)
PU_RESULT['Waterloo Assessment on time'] = PU_RESULT.apply(lambda row: 1 if row['Waterlow_Standard'] == "{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}" else 0,axis=1)
PU_RESULT['Waterloo Assessment not on time'] = PU_RESULT.apply(lambda row: 1 if row['Waterlow_Standard'] != "{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}" else 0,axis=1)
PU_RESULT['PU plan on time'] = PU_RESULT.apply(lambda row: 1 if (row['careplan_ontime'] in ([1]) )else 0,axis=1)
PU_RESULT['PU plan not on time'] = PU_RESULT.apply(lambda row: 1 if (row['careplan_ontime'] not in ([1]) )else 0,axis=1)
PU_RESULT['Re-positioning on time'] = PU_RESULT.apply(lambda row: 1 if (row['Careplan'] in (['No careplan', 'No risk',"0,0"]) )else 0,axis=1)
PU_RESULT['Re-positioning not on time'] = PU_RESULT.apply(lambda row: 1 if (row['Careplan'] not in (['No careplan', 'No risk',"0,0"]) )else 0,axis=1)
PU_RESULT['Careplan Compliance'] = PU_RESULT.apply(lambda row: 0 if (float(row['careplan_compliance_degree']) ==0) else (1 if float(row['careplan_compliance_degree'])<0.5 else (2 if 0.5<float(row['careplan_compliance_degree'])<0.8 else 3 )),axis=1)
PU_RESULT['Repositioning Compliance'] = PU_RESULT.apply(lambda row: 0 if (float(row['careplan_compliance_degree']) ==0) else (1 if float(row['careplan_compliance_degree'])<0.5 else (2 if 0.5<float(row['careplan_compliance_degree'])<0.8 else 3 )),axis=1)
Reference_Range,input_node_fields = CR_inputs_reference()
PU_RESULT = check_blood_normal(Reference_Range,input_node_fields,PU_RESULT)
Reference_Range,input_node_fields = Hb1AC_inputs_reference()
PU_RESULT = check_blood_normal(Reference_Range,input_node_fields,PU_RESULT)
Reference_Range,input_node_fields = Albumin_inputs_reference()
PU_RESULT = check_blood_normal(Reference_Range,input_node_fields,PU_RESULT)
Reference_Range,input_node_fields = FBC_inputs_reference()
PU_RESULT = check_blood_normal(Reference_Range,input_node_fields,PU_RESULT)
PU_RESULT=PU_RESULT.fillna(0)
return PU_RESULT
def data_load_clean():
Red004_Conn = Red004()
PU_RESULT = pd.read_sql_query('SELECT * from [AdvancedAnalytics].[dbo].[Variance_Analysis]',Red004_Conn)
HRG_stat = pd.read_sql_query('SELECT [FY] ,[HRG_CODE], [HRG_NAME] ,[ORDINARY_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS] ,[NON_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS] FROM [LOCAL_REFERENCE_DB].[ref].[NATIONAL_TARIFF_APC_OPROC] \
where FY = \'2020/2021\'',Red004_Conn)
Red004_Conn.close()
PU_RESULT = PU_RESULT[~PU_RESULT['Label'].str.contains('Empty')]
PU_RESULT=PU_RESULT.fillna(0)
encode_list=[#'Chief Complaint SNOMED Code'
#,'PRESENTING_COMPLAINT'
'modelled_events'
,'all_codes'
,'all_hrg'
,'WARD STAY LOCATION'
,'ETHNIC_CATEGORY_CODE'
,'PERSON_MARITAL_STATUS_CODE'
,'PERSON_GENDER_CODE_DESC'
,'ETHNIC_CATEGORY_CODE_DESC'
,'RELIGIOUS_OR_OTHER_BELIEF_SYSTEM_AFFILIATION'
,'PERSON_MARITAL_STATUS_CODE_DESC'
,'Waterlow_Standard'
,'Careplan'
,'WL - Age'
,'WL - Broken Type'
,'WL - Build/Weight for Height'
,'WL - Continence'
,'WL - Gender'
,'WL - Lack of Appetite'
,'WL - Major Surgery / Trauma'
,'WL - Medication'
,'WL - Mobility'
,'WL - Neurological Deficit'
,'WL - Recent Weight Loss'
,'WL - Skin Type'
,'WL - Tissue Malnutrition'
#,'WL - Waterlow Score'
,'WL - Weight Lost'
,'Wound Status',
'Photograph Wound',
'Wound Width',
'Wound Depth',
'Wound Exudate Odour',
'Dressing Type:',
'Wound Surrounding Tissue Colour',
'Wound Cleansing',
'Dressing Assessment:',
'Wound Undermining Location',
'Wound Tunneling Location',
'Wound Odour',
'Already Being Cared for in the Community',
'Wound Exudate Colour',
'Equipment Induced Pressure Ulcer',
'Wound Edge',
'Wound Percent Epithelialised:',
'Equipment Type',
'Wound Dressing Activity',
'Wound Colour',
'Next Dressing Change',
'Pressure Ulcer Present On Admission',
'Wound Length',
'Wound Percent Tissue Eschar',
'Pressure Ulcer Datix Number',
'Pressure Ulcer Datix completed',
'Consent to Photograph',
'Wound Percent Granulated',
'Wound Percent Tissue Slough',
'Wound Type - Wound Assessment',
'Wound Tunneling Depth',
'Wound Exudate Volume',
'Wound Undermining Depth',
'Wound Exudate Type',
'Wound Surrounding Tissue',
'Pressure Ulcer/Blister Category'
,'Referral Status Tissue Viability'
,'Referral - Tissue Viability','PATIENTPOSITION','Label']
for column in PU_RESULT[PU_RESULT.columns.difference(encode_list)]:
try:
PU_RESULT[column] = PU_RESULT[column].replace(' ', np.NaN).replace(['/'], np.NaN).replace('----',np.NaN).replace('See Lab Comment:',np.NaN).replace('----',np.NaN, regex=True).replace('[a-zA-Z]',np.NaN,regex=True).astype(float)
except Exception as e:
if column == 'C-reactive protein':
PU_RESULT[column] = PU_RESULT[column].replace('<1', 0.5).replace(['/'], np.NaN).replace('<0.2', 0.5).replace('<0.3', 0.5).replace('<0.6', 0.5).replace(' ', np.NaN).replace('[a-zA-Z]',np.NaN,regex=True).astype(float)
elif column =='Glucose, CSF':
PU_RESULT[column] = PU_RESULT[column].replace('<0.1', 0.1).replace('<0.2', 0.5).replace('<0.3', 0.5).replace(' ', np.NaN).replace('[a-zA-Z]',np.NaN,regex=True).astype(float)
elif e == 'cannot astype a datetimelike from [datetime64[ns]] to [float64]':
pass
# try:
# PU_RESULT['all_hrg'] = PU_RESULT.apply(lambda row: list(set(row['all_hrg'].split(","))) if row['all_hrg'] != 0 else row['all_hrg'],axis=1)
# PU_RESULT['all_hrg']=PU_RESULT['all_hrg'].apply(str)
# except:
# pass
PU_RESULT=PU_RESULT.fillna(0)
for index,row in PU_RESULT.iterrows():
#print(row['all_hrg'].strip("[']"))
try:
upper_boundary = int(HRG_stat[HRG_stat['HRG_CODE'] == row['all_hrg'].strip("[']")]['NON_ELECTIVE_LONG_STAY_TRIMPOINT_DAYS'])*3600*24
lower_boundary = 2
condition = 'Abnormal'
if 2< row['Total_LOS'] <= upper_boundary:
condition = 'Normal'
PU_RESULT.at[index,'Expected_LOS'] = condition
except:
PU_RESULT.at[index,'Expected_LOS'] = "Unknown"
print(len(PU_RESULT))
PU_RESULT = transform(PU_RESULT)
print("Transform finished.")
print(len(PU_RESULT))
utils_pickle.write(PU_RESULT,"PU_RESULT")
PU_RESULT= apply_tag(PU_RESULT)
utils_pickle.write(PU_RESULT,"PU_RESULT")
column_map = {"Sex":'PERSON_GENDER_CODE_DESC', "Ethcity":'ETHNIC_CATEGORY_CODE_DESC'}
list_dummy_column_map={}
for item in column_map:
dummy_column_map, PU_RESULT = get_dummy_list(column_map, PU_RESULT, item)
list_dummy_column_map.update(dummy_column_map)
#HRG_TLOS_AVG = pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].mean()
#HRG_TLOS_STD = pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].std()
#avg_=pd.DataFrame(pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].mean()).reset_index()
#std_=pd.DataFrame(pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].std()).reset_index()
#count_=pd.DataFrame(pd.DataFrame(PU_RESULT.groupby(['activity_identifier','all_hrg'])['Total_LOS'].mean()).reset_index().groupby(['all_hrg'])['Total_LOS'].count()).reset_index()
#hrg_stat = avg_.merge(std_, on='all_hrg',how='left').merge(count_, on='all_hrg',how='left')
utils_pickle.write(list_dummy_column_map, "PU_RESULT_DUMMY_COLUMNS")
utils_pickle.write(PU_RESULT,"PU_RESULT")
return
def PU_Demo():
Label=['No-diagnose', 'Diagnosed_PU']
Variable_selection = ['WL - Waterlow Score','Complex_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree','Mean cell volume', 'Haematocrit', 'Red blood cell count',
'Basophil count', 'White blood cell count', 'Mean cell haemoglobin',
'Neutrophil count', 'Eosinophil count', 'Haemoglobin',
'Lymphocyte count', 'Platelet count', 'Mean cell haemoglobin conc',
'Monocyte count', 'Haemoglobin A1c IFCC', 'C-reactive protein',
'Glucose fasting', 'Glucose random', 'Glucose, CSF',
'Glucose, dialysis fluid', 'Glucose, fluid', 'Albumin']
Demographic_variable_selection = ['Weight','Sex', 'Age']
Assessment = ['Waterloo Assessment pass', 'Waterloo Assessment fail', 'Waterloo Assessment on time', 'Waterloo Assessment not on time']
Prevention = ['PU plan on time','PU plan not on time', 'Re-positioning on time','Re-positioning not on time']
Patient_Cohort = ['Surgical Patient', 'Medical Patient', 'Ward Outliers','Over 60', 'Over Weight', 'Male', 'Female','PU High Risk', 'PU Patient','NO PU']
Waterlow_Standard = [
'No waterlow',
"{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Pass'}",
"{'rule 1': 'Pass', 'rule 2': 'Pass', 'rule 3': 'Pass', 'rule 4': 'Fail'}",
"{'rule 1': 'Pass', 'rule 2': 'Fail', 'rule 3': 'Fail', 'rule 4': 'Fail'}",
"{'rule 1': 'Pass', 'rule 2': 'Fail', 'rule 3': 'Fail', 'rule 4': 'Pass'}"]
Waterlow_Compliance = ['Rule 1: Use Waterlow', 'Rule 2: 4 Hours Admission', 'Rule 3: AE 4hours', 'Rule 4: Ward Transfer 4hours','No Waterlow']
LOS = ["Expected Long for HRG", "Unexpected Long for HRG","Unexpected short - live discharge","Unknown"]
events_list =['Waterlow','LabTest','Skinasses','TV','PatientPosition']
stats = interact(plot_Spell_PU_Degree_PairCompare,
Label=widgets.SelectMultiple(
options=
Label,
value= Label,
#rows=10,
description='Pressure Ulcer',
disabled=False
),Expected_LOS=widgets.SelectMultiple(
options=
LOS,
value= LOS,
#rows=10,
description='Expected LOS',
disabled=False
),selected_list=widgets.SelectMultiple(
options=
events_list,
value= ['Waterlow','LabTest'],
#rows=10,
description='Events',
disabled=False
)
,Age=widgets.IntRangeSlider(value=[0,120],
min=0,
max=120,
step=1,
description='Age:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Waterlow_Standard=widgets.SelectMultiple(
options=
Waterlow_Compliance,
value= ['Rule 1: Use Waterlow'] ,
#rows=10,
description='WaterlowStandard',
disabled=False)
,features=widgets.SelectMultiple(
options=
Variable_selection,
value= ['Complex_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree','WL - Waterlow Score'] ,
#rows=10,
description='Variables',
disabled=False
)
)
return stats
def sub_timeline_layout(tt, labels=None):
node = {}
for node_index, node_feature in tt.nodes(data=True):
if node_feature['name'] == 'Spell_Start':
node.update({node_index:node_feature['activity_start_time']})
elif node_feature['name'] == 'Spell_End':
node.update({node_index:node_feature['activity_end_time']})
else:
node.update({node_index:node_feature['activity_start_time']})
# for node_index, node_feature in tt.nodes(data=True):
# node.update({node_index:node_feature['activity_start_time']})
sorted_dic = sorted(node.items(), key=lambda kv: kv[1])
pos = {}
i=0
x = 0
y = 0
list_=[]
for i in range(len(sorted_dic)):
if i >0:
diff = datetime.strptime(sorted_dic[i][1],'%Y.%m.%d %H:%M:%S')-datetime.strptime(sorted_dic[i-1][1],'%Y.%m.%d %H:%M:%S')
x = x +(diff.seconds)/18
y = y
pos.update({sorted_dic[i][0]:np.array([x,y])})
else:
pos.update({sorted_dic[0][0]:np.array([0,0])})
if labels is not None:
result = ''.join([i for i in labels[sorted_dic[i][0]] if not i.isdigit()])
if result == '._start':
continue
elif result == '._end':
continue
else:
list_.append(result)
unique_events = set(list_)
pos_y = 20
for item in unique_events:
for i in range(len(sorted_dic)):
event_match = re.match( r'{}'.format(item), labels[sorted_dic[i][0]], re.M|re.I)
if event_match:
x= pos[sorted_dic[i][0]][0]
y = pos_y
pos.update({sorted_dic[i][0]:np.array([x,y])})
pos_y = pos_y + 30
return pos
def PU_Path_Vis_Demo_fromRecords(item = 1234567):
# hv.extension('bokeh')
# defaults = dict(width=1000, height=1000, padding=0.1)
# Load individual visualization requires patientKG Class
graph = patientKG.utils_pickle.read("GraphCalculationResults/Ward_Stay/KG_{}".format(item))
# hv.opts.defaults(opts.EdgePaths(**defaults), opts.Graph(**defaults), opts.Nodes(**defaults))
label = dict((int(v),k) for k,v in graph.node_dic.items())
combined = graph.graph
# for index, item in combined.nodes(data=True):
# print(item['color'])
# combined._node[6]["WARD STAY LOCATION"]=""
# combined._node[7]["WARD STAY LOCATION"]=""
# combined._node[8]["WARD STAY LOCATION"]=""
# combined._node[9]["WARD STAY LOCATION"]=""
attr={}
for index, node in combined.nodes(data=True):
if index==0 or index == 1:
attr.update({index:{'abv': node['name']}})
else:
attr.update({index:{'abv':"".join(e[0] for e in node['name'].split())}})
nx.set_node_attributes(combined, attr)
hv.opts.defaults(
opts.EdgePaths(**defaults), opts.Graph(**defaults), opts.Nodes(**defaults))
pos = graph.timeline_layout(label)
optss = dict(node_size='size', edge_line_width=0.5 ,node_color='color', cmap=['#30a2da','yellow','red','green','black'])
simple_graph=hv.Graph.from_networkx(combined, pos).options(**optss)
labels = hv.Labels(simple_graph.nodes, ['x', 'y'], 'abv')
# print(simple_graph.nodes)
# print(graph.graph.degree)
#bokeh_server = pn.Row(simple_graph* labels.opts(text_font_size='16pt', text_color='white', bgcolor='gray')).show(port=12345)
return pn.Row(simple_graph* labels.opts(text_font_size='16pt', text_color='white', bgcolor='gray'))
# days_nodes = {}
# for i in range(0, 16, 1):
# nodes_list = [0,1]
# for k,v in graph.graph.nodes(data=True):
# if k > 1:
# diff = (datetime.strptime(v['activity_start_time'],'%Y.%m.%d %H:%M:%S') -datetime.strptime(graph.graph.nodes[0]['activity_start_time'],'%Y.%m.%d %H:%M:%S')).total_seconds()
# if diff <= i*3600*24:
# nodes_list.append(k)
# days_nodes.update({i:nodes_list})
# debug_ = {i: hv.Graph.from_networkx(graph.graph.subgraph(days_nodes[i]), sub_timeline_layout(graph.graph.subgraph(days_nodes[i]),dict((int(v),k) for k,v in graph.node_dic.items())), iterations=i, seed=10) for i in range(0, 16, 1)}
# return hv.HoloMap({i: hv.Graph.from_networkx(graph.graph.subgraph(days_nodes[i]), sub_timeline_layout(graph.graph.subgraph(days_nodes[i]),dict((int(v),k) for k,v in graph.node_dic.items())), iterations=i, seed=10) for i in range(0, 16, 1)},
# kdims='Iterations')
def PU_Path_Vis_Demo_live(item = 1234567):
#While modelling process is okey, but using Jupyter running as windows service, login detail is'RBBH_MSDOMAIN1\\RBHDBSRED008$', which currently has no access to database
#Thus result in live querying fail.
return test_compose(str(item))
def PU_Path_DEMO():
try:
print("\
WA: Waterlow Assessemnt \n \
Node in Red: Not implement. \n \
Cplb: C-reactive protein level, blood\n \
Node in Red: test result is out of normal range;\n \
PP: Patient Position\n \
Node in red: breach 6 hours repositioning requirement\n \
Node in yellow: breach 4 hours repositioning requirement\n \
WS: Ward Stay\n \
Node in Red: Waterlow assessment not performed within hours after ward transfer\n \
Fbcb: Full blood count, blood\n \
Node in Red: test result is out of normal range;")
stats = interact(PU_Path_Vis_Demo_fromRecords,
item=widgets.Text(value='1234567',
placeholder='Type in Spell Number',
description='Spell:',
disabled=False))
return stats
except:
return "No Such Spell!"
def generate_cohort_pattern(Patient_Cohort):
pattern=''
Union_criteria = [('Male', 'Female'),('PU Patient','NO PU'),('No Surgical','Surgical Patient'),('PU Low Risk', 'PU High Risk')]
union_BMI = ['Healthy','Under Weight','Over Weight','Obese','Unknown BMI - Missing value']
tt=[]
bmi =[]
for item in Patient_Cohort:
check = [(x,y) for x, y in Union_criteria if (x == item or y ==item)]
if len(check)<1 and item not in union_BMI:
y = '(?=.*{})'.format(item)
pattern+=y
elif item in union_BMI:
bmi.append(item)
else:
tt.append(check)
ttt= [[g[0], len(list(g[1]))] for g in itertools.groupby(tt)]
for item in ttt:
if item[1] > 1:
pattern+='((?=.*{})|(?=.*{}))'.format(item[0][0][0],item[0][0][1])
elif item[1] == 1:
for check_item in Patient_Cohort:
check = [(x,y) for x, y in Union_criteria if (x == check_item or y ==check_item)]
if len(check)==1 and check == item[0]:
y = '(?=.*{})'.format(check_item)
pattern+=y
union_pattern=''
while bmi:
y = '(?=.*{})|'.format(bmi[0])
union_pattern+=y
bmi.pop(0)
if len(union_pattern)>0:
union_pattern= "("+union_pattern[:-1]+")"
pattern+=union_pattern
return pattern
def get_dummy_list(column_map, df, column):
column_mapped = [column_map[column]]
df[column] = df[column_mapped]
dum_df = pd.get_dummies(df, columns=column_mapped, prefix=["Type_is"] )
column_diff = list(set(dum_df.columns) - set(df.columns))
dummy_column_map = {column:column_diff}
return dummy_column_map, dum_df
def predefined_cohort(Patient_Cohort):
df = pd.DataFrame()
PU_RESULT = patientKG.utils_pickle.read("PU_RESULT")
pattern = generate_cohort_pattern(Patient_Cohort)
PU_RESULT_COHORT =PU_RESULT.loc[(PU_RESULT['Cohort_Group'].str.contains(pattern))]
return PU_RESULT_COHORT
TEMPLATE = u"""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>PivotTable.js</title>
<!-- external libs from cdnjs -->
<link rel="stylesheet" type="text/css" href="https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.11/c3.min.css">
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.11/c3.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jquery/1.11.2/jquery.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.11.4/jquery-ui.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jquery-csv/0.71/jquery.csv-0.71.min.js"></script>
<link rel="stylesheet" type="text/css" href="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/pivot.min.css">
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/pivot.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/d3_renderers.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/c3_renderers.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pivottable/2.19.0/export_renderers.min.js"></script>
<style>
body {font-family: Verdana;}
.node {
border: solid 1px white;
font: 10px sans-serif;
line-height: 12px;
overflow: hidden;
position: absolute;
text-indent: 2px;
}
.c3-line, .c3-focused {stroke-width: 3px !important;}
.c3-bar {stroke: white !important; stroke-width: 1;}
.c3 text { font-size: 12px; color: grey;}
.tick line {stroke: white;}
.c3-axis path {stroke: grey;}
.c3-circle { opacity: 1 !important; }
.c3-xgrid-focus {visibility: hidden !important;}
</style>
</head>
<body>
<script type="text/javascript">
$(function(){
$("#output").pivotUI(
$.csv.toArrays($("#output").text())
, $.extend({
renderers: $.extend(
$.pivotUtilities.renderers,
$.pivotUtilities.c3_renderers,
$.pivotUtilities.d3_renderers,
$.pivotUtilities.export_renderers
),
hiddenAttributes: [""]
}
, {
onRefresh: function(config) {
var config_copy = JSON.parse(JSON.stringify(config));
//delete some values which are functions
delete config_copy["aggregators"];
delete config_copy["renderers"];
//delete some bulky default values
delete config_copy["rendererOptions"];
delete config_copy["localeStrings"];
$("#output2").text(JSON.stringify(config_copy, undefined, 2));
}
}
, %(kwargs)s
, %(json_kwargs)s)
).show();
});
</script>
<div id="output" style="display: none;">%(csv)s</div>
<textarea id="output2"
style="float: left; width: 0px; height: 0px; margin: 0px; opacity:0;" readonly>
</textarea>
<button onclick="copyTextFunction()">Copy settings</button>
<script>
function copyTextFunction() {
var copyText = document.getElementById("output2");
copyText.select();
document.execCommand("copy");
}
</script>
</body>
</html>
"""
def pivot_cht_ui(df, name="test", url="",
width="100%", height="500",json_kwargs='', **kwargs):
#print(name)
outfile_path = name + '.html'
with io.open(outfile_path, 'wt', encoding='utf8') as outfile:
csv = df.to_csv(encoding='utf8')
if hasattr(csv, 'decode'):
csv = csv.decode('utf8')
outfile.write(TEMPLATE %
dict(csv=csv, kwargs=json.dumps(kwargs),json_kwargs=json_kwargs))
return IFrame(src=url or outfile_path, width=width, height=height)
def get_pvalue(df,feature_list,categorical_feature_list):
rows_list = []
outcome_number = len(df['Label'].unique())
for item in df[feature_list].columns:
if item not in categorical_feature_list:
dia_list = []
undiag_list = []
for label in df['Label'].unique():
if label == 'Diagnosed_PU':
dia_list.append(df[df['Label']==label][item].values)
else:
undiag_list.append(df[df['Label']==label][item].values)
dd=[]
ddd = []
for da_item in list(dia_list[0]):
dd.append(da_item)
for und_item in list(undiag_list[0]):
ddd.append(und_item)
fvalue, pvalue = stats.f_oneway(*[dd,ddd])
rows_list.append((item,pvalue))
else:
dict1 = {}
contigency= pd.crosstab(df[item], df['Label'])
c, p, dof, expected = chi2_contingency(contigency)
dict1.update({item:p})
rows_list.append((item,p))
return rows_list
#def plot_func(df, Outcome, Patient_Cohort, DateRange,Demographic, Assessment, Prevention, Blood_Results, Blood_Normalty,Management):
def plot_Spell_PU_Degree_PairCompare_v2(Outcome, Patient_Cohort, DateRange,Demographic, Assessment, Prevention, Blood_Results, Blood_Normalty,Management):
#Corhot Selection
#print(list(Patient_Cohort))
if list(Patient_Cohort) == ['All']:
df= patientKG.utils_pickle.read("PU_RESULT")
#plot_func(df, Outcome, Patient_Cohort, DateRange,Demographic, Assessment, Prevention, Blood_Results, Blood_Normalty,Management)
else:
Patient_Cohort = list(Patient_Cohort)
try:
df=predefined_cohort(Patient_Cohort)
if df.empty:
return "No Sample!"
#else:
#plot_func(df, Outcome, Patient_Cohort, DateRange,Demographic, Assessment, Prevention, Blood_Results, Blood_Normalty,Management)
# df = pd.concat([df,data])
except:
print("No Sample!")
sys.exit(1)
return "No Sample!"
def pop_std(x):
return x.std(ddof=0)
df['date'] = pd.to_datetime(df['HPS_ACTIVITY_DATE_TIME'])
mask = (df['date'] > DateRange[0]) & (df['date'] <= DateRange[1])
df = df.loc[mask]
features = []
df= df.fillna(0)
Demographic_map = {"Weight":"weight",
"Sex":['Type_is_Sex is female', 'Type_is_Sex is male','Type_is_Sex is unknown', 'Type_is_Unspecified']
,"Age":'HPS_AGE_AT_ADMISSION_DATE'
,"Ethcity":'ETHNIC_CATEGORY_CODE_DESC'}
Assessment_map = {"Waterlow Assessment Outcomes":"Waterloo Assessment pass", "Waterloo Assessment fail":"Waterloo Assessment fail", "Waterlow Assessment timeliness":"Waterloo Assessment on time","Waterloo Assessment not on time":"Waterloo Assessment not on time" }
Prevention_map = {'PU plan initia timeliness':'PU plan on time','PU plan not on time':'PU plan not on time', 'Re-positioning timeliness':'Re-positioning on time','Re-positioning not on time':'Re-positioning not on time','Re-positioning Compliance':'Repositioning Compliance'}
Management_map={'Ward Move':'ward_move'}
One_hot_encoding_map= utils_pickle.read("PU_RESULT_DUMMY_COLUMNS")
for item in Demographic:
if item not in One_hot_encoding_map.keys():
features.append(Demographic_map[item])
else:
features = features +list(One_hot_encoding_map[item])
for item in Assessment:
if item not in One_hot_encoding_map.keys():
features.append(Assessment_map[item])
else:
features = features +list(One_hot_encoding_map[item])
for item in Prevention:
if item not in One_hot_encoding_map.keys():
features.append(Prevention_map[item])
else:
features = features +list(One_hot_encoding_map[item])
for item in Blood_Results:
if item not in One_hot_encoding_map.keys():
features.append(item)
else:
features = features +list(One_hot_encoding_map[item])
for item in Blood_Normalty:
if item not in One_hot_encoding_map.keys():
features.append(item+'_normal')
else:
features = features +list(One_hot_encoding_map[item])
for item in Management:
if item not in One_hot_encoding_map.keys():
features.append(Management_map[item])
else:
features = features +list(One_hot_encoding_map[item])
#features = ['Sum_Degree','Global_Central', 'Total_LOS', 'Turnaround_Degree']
try:
principalComponents,pca_explained,pca_components = sliced_principle_components(df,features,2)
principalDf = | pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from pyshop.shopcore.shop_api import get_attribute_value, get_time_resolution, set_attribute
class ShopApiMock:
mock_dict = {
'GetIntValue': 11,
'GetIntArray': [11, 22],
'GetDoubleValue': 1.1,
'GetDoubleArray': [1.1, 2.2],
'GetStringValue': 'abc',
'GetStringArray': ['abc', 'def'],
'GetXyCurveX': [0, 1],
'GetXyCurveY': [0.0, 1.1],
'GetSyCurveS': ['s1', 's2'],
'GetSyCurveY': [0.0, 1.1],
'GetXyCurveReference': 0.0,
'GetXyCurveArrayReferences': [0.0, 10.0],
'GetXyCurveArrayNPoints': [2, 3],
'GetXyCurveArrayX': [0, 1, 0, 1, 2],
'GetXyCurveArrayY': [0.0, 1.1, 0.0, 1.1, 2.2],
'GetTimeUnit': 'minute',
'GetTxySeriesStartTime': '202201010000',
'GetTxySeriesT': [0, 15, 30, 45, 60, 120],
'GetTxySeriesY': [0.0, 1.1, 2.2, 3.3, 4.4, 5.5],
'GetTimeZone': '',
'GetStartTime': '202201010000',
'GetEndTime': '202201010300',
'GetTimeResolutionT': [0, 60],
'GetTimeResolutionY': [15, 60]
}
def __getattr__(self, command: str):
def dummy_func(*args):
if command.startswith('Get'):
return self.mock_dict[command]
elif command.startswith('Set'):
self.mock_dict[command] = args
return dummy_func
def __getitem__(self, command):
return self.mock_dict[command]
class TestGetAttribute:
shop_api = ShopApiMock()
def test_get_int(self):
assert get_attribute_value(self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'int') == self.shop_api['GetIntValue']
def test_get_int_array(self):
assert(
get_attribute_value(
self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'int_array'
) == self.shop_api['GetIntArray']
)
def test_get_double(self):
assert(
get_attribute_value(
self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'double'
) == self.shop_api['GetDoubleValue']
)
def test_get_double_array(self):
assert(
get_attribute_value(
self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'double_array'
) == self.shop_api['GetDoubleArray']
)
def test_get_string(self):
assert(
get_attribute_value(
self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'string'
) == self.shop_api['GetStringValue']
)
def test_get_string_array(self):
assert(
get_attribute_value(
self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'string_array'
) == self.shop_api['GetStringArray']
)
def test_get_xy(self):
value = get_attribute_value(self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'xy')
assert (value.index == self.shop_api['GetXyCurveX']).all()
assert (value.values == self.shop_api['GetXyCurveY']).all()
assert value.name == self.shop_api['GetXyCurveReference']
def test_get_sy(self):
value = get_attribute_value(self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'sy')
assert (value.index == self.shop_api['GetSyCurveS']).all()
assert (value.values == self.shop_api['GetSyCurveY']).all()
def test_get_xy_array(self):
value = get_attribute_value(self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'xy_array')
for i, n in enumerate(self.shop_api['GetXyCurveArrayNPoints']):
n_sum = sum(self.shop_api['GetXyCurveArrayNPoints'][0:i])
assert (value[i].index == self.shop_api['GetXyCurveArrayX'][n_sum:n_sum + n]).all()
assert (value[i].values == self.shop_api['GetXyCurveArrayY'][n_sum:n_sum + n]).all()
assert value[i].name == self.shop_api['GetXyCurveArrayReferences'][i]
def test_get_txy(self):
value = get_attribute_value(self.shop_api, 'obj_name', 'obj_type', 'attr_name', 'txy')
if self.shop_api['GetTimeUnit'] == 'hour':
starttime = | pd.Timestamp(self.shop_api['GetTxySeriesStartTime']) | pandas.Timestamp |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_zh_a_hist_pre_min_em(
symbol: str = "000001",
start_time: str = "09:00:00",
end_time: str = "15:50:00",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情包含盘前数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_time: 开始时间
:type start_time: str
:param end_time: 结束时间
:type end_time: str
:return: 每日分时行情包含盘前数据
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "https://push2.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"ndays": "1",
"iscr": "1",
"iscca": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
date_format = temp_df.index[0].date().isoformat()
temp_df = temp_df[
date_format + " " + start_time : date_format + " " + end_time
]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.t | o_numeric(temp_df["收盘"]) | pandas.to_numeric |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = | Period('1Q2005') | pandas.tseries.period.Period |
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
needs_i8_conversion,
)
import pandas as pd
from pandas import NumericIndex
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_unique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.unique()
# dict.fromkeys preserves the order
unique_values = list(dict.fromkeys(obj.values))
if isinstance(obj, pd.MultiIndex):
expected = pd.MultiIndex.from_tuples(unique_values)
expected.names = obj.names
tm.assert_index_equal(result, expected, exact=True)
elif isinstance(obj, pd.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(unique_values, dtype=obj.dtype)
tm.assert_index_equal(result, expected, exact=True)
elif isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
expected = expected.normalize()
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(unique_values)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_unique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not | allow_na_ops(obj) | pandas.tests.base.common.allow_na_ops |
# -*- coding: utf-8 -*-
"""
Created on Fri May 21 14:50:55 2021
@author: Oswin
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import itertools
from sklearn.metrics import accuracy_score, recall_score, precision_score
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
# from sklearn.base import BaseEstimator, TransformerMixin
from emissions.data import load_data, clean_data, split
from emissions.transformer import MakeTransformer
def scoring_table(search, param_index,cols):
"""
takes grid search output and index of best params
returns a scoring table
"""
result = search.cv_results_
tmp = pd.DataFrame({'train':{'accuracy': result['mean_train_accuracy'][param_index],
'recall': result['mean_train_recall'][param_index],
'precision': result['mean_train_precision'][param_index]},
'val':{'accuracy': result['mean_test_accuracy'][param_index],
'recall': result['mean_test_recall'][param_index],
'precision': result['mean_test_precision'][param_index]}
})
y_pred = search.best_estimator_.predict(X_test[cols])
y_true = y_test
tmp.loc['accuracy', 'test'] = accuracy_score(y_true, y_pred)
tmp.loc['recall', 'test'] = recall_score(y_true, y_pred)
tmp.loc['precision', 'test'] = precision_score(y_true, y_pred)
return tmp.round(3)
def plot_learning_curve(model, X_train, y_train, name='test', scoring='recall'):
"""takes a model, X_train, y_train and plots learning curve"""
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
train_sizes, train_scores, test_scores = learning_curve(model.best_estimator_,
X_train,
y_train,
train_sizes=np.linspace(0.05, 1, 20),
cv=cv,
scoring=scoring,
n_jobs=-1
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.plot(train_sizes, train_scores_mean, label = 'Train')
plt.fill_between(train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1)
plt.plot(train_sizes, test_scores_mean, label = 'Val')
plt.fill_between(train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1)
plt.legend()
plt.ylabel('score')
plt.xlabel('train sizes')
if scoring=='recall':
plt.ylim(0.6, 1)
plt.savefig('../tree_figs/' + name + '.png', bbox_inches='tight')
plt.close()
## Get and clean data
# first of all: get the data
df = load_data()
df = clean_data(df)
# second of all: split the data
X_train, X_test, y_train, y_test = split(df)
# interesting columns: THIS DOES NOT WORK WITH MAKE FOR NOW
col_names = ['MODEL_YEAR','VEHICLE_AGE','MILE_YEAR','GVWR','ENGINE_SIZE',
'TRANS_TYPE','TEST_TYPE','ENGINE_WEIGHT_RATIO','VEHICLE_TYPE']
# ,'BEFORE_2000','SPORT','MAKE_VEHICLE_TYPE','MAKE'
m = 20
## full feature tree
cols = col_names
cat_cols = []
if 'TRANS_TYPE' in cols:
cat_cols.extend(['TRANS_TYPE'])
if 'TEST_TYPE' in cols:
cat_cols.extend(['TEST_TYPE'])
if 'MAKE_VEHICLE_TYPE' in cols:
cat_cols.extend(['MAKE_VEHICLE_TYPE'])
if ('MAKE' in cols):
# transform make
make_processor = Pipeline([
('make_transformer', MakeTransformer()),
('encoder', OneHotEncoder(handle_unknown='ignore'))
])
# Preprocessor
preprocessor = ColumnTransformer([
('make_processor', make_processor, ['MAKE']),
('encoder', OneHotEncoder(handle_unknown='ignore'), cat_cols)],
remainder='passthrough'
)
# Combine preprocessor and linear model in pipeline
pipe = Pipeline([
('preprocessing', preprocessor),
('model', DecisionTreeClassifier(class_weight='balanced'))
])
elif cat_cols != []:
# Preprocessor
preprocessor = ColumnTransformer([
('encoder', OneHotEncoder(handle_unknown='ignore'), cat_cols)],
remainder='passthrough'
)
# Combine preprocessor and linear model in pipeline
pipe = Pipeline([
('preprocessing', preprocessor),
('model', DecisionTreeClassifier(class_weight='balanced'))
])
else:
pipe = Pipeline([
('model', DecisionTreeClassifier(class_weight='balanced'))
])
# Hyperparameter Grid
grid = {'model__max_depth': np.arange(2, m, 1)}
# Instanciate Grid Search
search = GridSearchCV(pipe,
grid,
scoring=['accuracy', 'recall', 'precision'],
cv=10,
refit='recall',
return_train_score=True,
n_jobs=18
)
search.fit(X_train[cols], y_train)
result = search.cv_results_
pd.DataFrame(result)[['param_model__max_depth',
'mean_test_recall',
'mean_train_recall']].sort_values('mean_test_recall', ascending=False).head(5)
tmp = scoring_table(search, search.best_index_, cols)
name = 'DT_all_' + str(search.best_params_['model__max_depth'])
plot_learning_curve(search, X_train[cols], y_train, name=name)
df_res = pd.DataFrame()
df_all = | pd.DataFrame() | pandas.DataFrame |
import os
import random
import pyperclip
import string
from datetime import datetime
import pandas as pd
def generator():
length = 16
password = []
punctuation = "-+?_!&"
password.append(random.choice(string.ascii_lowercase))
password.append(random.choice(string.ascii_uppercase))
password.append(random.choice(string.digits))
password.append(random.choice(punctuation))
chars = list((string.ascii_letters + string.digits + punctuation) * 10)
random.shuffle(chars)
length = length - 5
password.append(
"".join(
random.sample(
chars,
length,
)
)
)
random.shuffle([i for c in password for i in c])
password = random.choice(string.ascii_letters) + "".join(password)
return password
def writer(data):
df = pd.DataFrame(data=data, index=[0])
if not os.path.exists("data.csv"):
df.to_csv("data.csv", index=False)
else:
df.to_csv("data.csv", mode="a", header=False, index=False)
def reader():
keyword = input("Search keyword: ")
if not os.path.exists("data.csv"):
return None
else:
df = | pd.read_csv("data.csv") | pandas.read_csv |
__author__ = "<NAME>"
__license__ = "GPL"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>"]
__maintainer__ = "Md. <NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
# Importing libraries
import os
import glob
import pandas as pd
import numpy as np
from datetime import datetime
import aggregator as aggregator
'''# Scanning all the file names
os.chdir("research/irp-logs-mining/Dataset/benign-irp-logs/machine_7")
all_filenames = [i for i in glob.glob('*')]
all_filenames = sorted(all_filenames)'''
# NPSY flags
def generateStringForIRPFlags(x):
val = "0:0:0:0"
if(len(x) != 4):
return val
else:
if(x[0] != '-'): # N flag
val = '1:'
else:
val = '0:'
if(x[1] != '-'): # P flag
val += '1:'
else:
val += '0:'
if(x[2] != '-'): # S flag
val += '1:'
else:
val += '0:'
if(x[3] != '-'): # Y flag
val += '1'
else:
val += '0'
return val
def main(raw_dataset):
# The following are the column names to be used for the processed dataset
column_names = [
"operation_irp", "operation_fsf", "operation_fio", #categorical / string -> int as flags
"sequence_number", #hex / string -> int
"pre_operation_time", "post_operation_time", #timestamp -> float
"operation_elapsed", #float
"process_id", "thread_id", "parent_id", #numerical
"process_name", #string
"major_operation_type", "minor_operation_type", #categorical / string
"irp_flag", #hex / string -> int
"irp_nocache", "irp_paging_io", "irp_synchoronous_api", "irp_synchoronous_paging_io", #flag values
"device_object", "file_object", "transaction", "status", "inform", #hex / string -> int
"arg1", "arg2", "arg3", "arg4", "arg5", "arg6", # hex / string -> int
"buffer_length", "entropy", #numerical
"file_name", #string
"family_id", #numerical / multiclass
"class" #binary
]
raw_dataset = raw_dataset.drop(raw_dataset.index[0]) # Removing the first row
raw_dataset.columns = raw_dataset.columns.str.strip()
# Operation Type
raw_dataset['Opr'] = raw_dataset['Opr'].str.strip()
raw_dataset = raw_dataset.drop(raw_dataset[(raw_dataset['Opr'] != 'IRP') & (raw_dataset['Opr'] != 'FSF') & (raw_dataset['Opr'] != 'FIO')].index)
# OneHotEncoding and then concat for operation
one_hot = pd.get_dummies(raw_dataset.Opr, prefix='operation')
raw_dataset = raw_dataset.drop('Opr', axis=1)
raw_dataset = one_hot.join(raw_dataset)
del one_hot
# Sequence Number
raw_dataset['SeqNum'] = raw_dataset['SeqNum'].str.strip()
# Pre Operation Time
raw_dataset['PreOp Time'] = raw_dataset['PreOp Time'].str.strip()
raw_dataset['PreOp Time'] = [datetime.strptime(i, "%H:%M:%S:%f").strftime("%H:%M:%S.%f") for i in raw_dataset['PreOp Time']]
raw_dataset['PreOp Time'] = | pd.to_timedelta(raw_dataset['PreOp Time']) | pandas.to_timedelta |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from pandas import Timestamp
##### DATA #####
data = {'Task': {0: 'TSK M',
1: 'TSK N',
2: 'TSK L',
3: 'TSK K',
4: 'TSK J',
5: 'TSK H',
6: 'TSK I',
7: 'TSK G',
8: 'TSK F',
9: 'TSK E',
10: 'TSK D',
11: 'TSK C',
12: 'TSK B',
13: 'TSK A'},
'Department': {0: 'IT',
1: 'MKT',
2: 'ENG',
3: 'PROD',
4: 'PROD',
5: 'FIN',
6: 'MKT',
7: 'FIN',
8: 'MKT',
9: 'ENG',
10: 'FIN',
11: 'IT',
12: 'MKT',
13: 'MKT'},
'Start': {0: Timestamp('2022-03-17 00:00:00'),
1: Timestamp('2022-03-17 00:00:00'),
2: Timestamp('2022-03-10 00:00:00'),
3: Timestamp('2022-03-09 00:00:00'),
4: Timestamp('2022-03-04 00:00:00'),
5: Timestamp('2022-02-28 00:00:00'),
6: Timestamp('2022-02-28 00:00:00'),
7: Timestamp('2022-02-27 00:00:00'),
8: Timestamp('2022-02-26 00:00:00'),
9: Timestamp('2022-02-23 00:00:00'),
10: Timestamp('2022-02-22 00:00:00'),
11: Timestamp('2022-02-21 00:00:00'),
12: | Timestamp('2022-02-19 00:00:00') | pandas.Timestamp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 15:08:28 2019
@author: binbin
"""
## import some libriaries ##
import pandas as pd
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import MinMaxScaler
from sklearn import preprocessing
from sklearn.neural_network import MLPRegressor
from sklearn import ensemble
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate, cross_val_predict
import matplotlib.pyplot as plt
from matplotlib import cm as cm
import plotly.express as px
def draw_train_test_kde(feature_name,train_df,test_df):
fig, ax = plt.subplots(figsize=(6, 3))
sns.distplot(train_df[feature_name], color=sns.color_palette("coolwarm",5)[0], label='Train')
sns.distplot(test_df[feature_name], color=sns.color_palette("coolwarm",5)[4], label='Test')
ax.set_title('Comparison of the ' + feature_name + ' distribution', size=20);
####################Data Preparation ##############################
df=pd.read_pickle('df.pkl')
## drop some features that were addtionally included and but not used ##
df=df.drop(['SVF Mean Y'],axis=1)
df=df.drop(['SVF Mean X'],axis=1)
df=df.drop(['SVF Mean Z'],axis=1)
df=df.drop(['SVF STD Y'],axis=1)
df=df.drop(['SVF STD X'],axis=1)
df=df.drop(['SVF STD Z'],axis=1)
df=df.drop(['Realsurface'],axis=1)
df=df.drop(['Thoughness'],axis=1)
#### some specific scalling parameters##
scale_para=30e7
FN_len=0.4
area=0.05*FN_len
para=scale_para/area
df['Strain to failure']=df['Strain to failure']*100/FN_len
df['Maximal Stress']=df['Maximal Stress']*para/1e6
df['Initial Effective Stiffness']=df['Initial Effective Stiffness']*para/(100/FN_len)
dataset2=df.copy()
##resemble and naming for the correlation after analysis of the heat plot ##
dataset2=dataset2.rename(columns={'Orientation STD': 'F-O STD', 'Length STD':'F-L STD',"Diameter STD": "F-D STD",
"Cont_Area_X_Mean":"C-ASD_X Mean",
"Cont_Area_X_STD":"C-ASD_X STD",
"Cont_Area_Y_Mean":"C-ASD_Y Mean",
"Cont_Area_Y_STD":"C-ASD_Y STD",
"Cont_Area_Z_Mean":"C-ASD_Z Mean",
"Cont_Area_Z_STD":"C-ASD_Z STD",
"Cont_Area_Normal_X_Mean":"C-ANO_X Mean",
"Cont_Area_Normal_X_STD":"C-ANO_X STD",
"Cont_Area_Normal_Y_Mean":"C-ANO_Y Mean",
"Cont_Area_Normal_Y_STD":"C-ANO_Y STD",
"Cont_Area_Normal_Z_Mean":"C-ANO_Z Mean",
"Cont_Area_Normal_Z_STD":"C-ANO_Z STD",
"Cont_Area_Size_Mean":"C-AS Mean",
"Cont_Area_Size_STD":"C-AS STD"})
dataset3=dataset2.copy()
## Drop ones with high correlation ##
dataset3=dataset3.drop(['C-ASD_Z Mean'],axis=1)
dataset3=dataset3.drop(['C-ANO_X STD'],axis=1)
dataset3=dataset3.drop(['C-ANO_Y STD'],axis=1)
dataset3=dataset3.drop(['C-AS STD'],axis=1)
dataset3=dataset3.drop(['C-ANO_Z STD'],axis=1)
dataset3=dataset3.drop(['C-ANO_Z Mean'],axis=1)
## setting the features for plotting purposes##
A=list(range(0,20))
targets_index=18
A.pop(targets_index)
X0=dataset2.iloc[:,0:17]
Y0=dataset2.iloc[:,17:20]
a = 13 ## choice of output to evaluate
X=dataset3.iloc[:,0:11]
y=dataset3.iloc[:,a]
###################### potting the correlation of the data #####################
##before selecting##
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
corr = spearmanr(X0).correlation
corr_linkage = hierarchy.ward(corr)
dendro = hierarchy.dendrogram(corr_linkage, labels=X0.columns, ax=ax1,
leaf_rotation=90)
dendro_idx = np.arange(0, len(dendro['ivl']))
clo=ax2.imshow(corr[dendro['leaves'], :][:, dendro['leaves']])
ax2.set_xticks(dendro_idx)
ax2.set_yticks(dendro_idx)
ax2.set_xticklabels(dendro['ivl'], rotation='vertical')
ax2.set_yticklabels(dendro['ivl'])
fig.tight_layout()
fig.colorbar(clo)
plt.savefig("Feature_Selection_before.svg",format="svg")
plt.show()
##after selection##
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
corr = spearmanr(X).correlation
corr_linkage = hierarchy.ward(corr)
dendro = hierarchy.dendrogram(corr_linkage, labels=X.columns, ax=ax1,
leaf_rotation=90)
dendro_idx = np.arange(0, len(dendro['ivl']))
clo=ax2.imshow(corr[dendro['leaves'], :][:, dendro['leaves']])
clo1=corr[dendro['leaves'], :][:, dendro['leaves']]
ax2.set_xticks(dendro_idx)
ax2.set_yticks(dendro_idx)
ax2.set_xticklabels(dendro['ivl'], rotation='vertical')
ax2.set_yticklabels(dendro['ivl'])
fig.tight_layout()
fig.colorbar(clo)
plt.savefig("Feature_Selection_after.svg",format="svg")
plt.show()
################# Permutation model training ####################
scalerSS= StandardScaler()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15,
random_state=10)
try:
y_train.shape[1]
except IndexError:
y_train=y_train.values.reshape(-1,1)
y_test=y_test.values.reshape(-1,1)
X_train=scalerSS.fit_transform(X_train)
X_test = scalerSS.transform(X_test)
y_train=scalerSS.fit_transform(y_train)
y_test=scalerSS.transform(y_test)
mlp= ensemble.GradientBoostingRegressor()
MLP=mlp.fit(X_train, y_train)
y_pred=mlp.predict(X_test)
mseMLP = mean_squared_error(y_test, y_pred)
print("MSE MLP: %.4f" % mseMLP)
print("Train R2 score MLP: {:.2f}".format(mlp.score(X_train, y_train)))
print("Test R2 score MLP: {:.2f}".format(mlp.score(X_test, y_test)))
score_MLP= cross_val_score(MLP, X_test, y_test,scoring='r2')
print("Test CV-error MLP: {:.2f}".format(score_MLP.mean()))
if a == 11:
result_strain = permutation_importance(mlp, X_test, y_test, n_repeats=100,
random_state=42, n_jobs=10)
print('strain is selected')
elif a == 12:
result_stress = permutation_importance(mlp, X_test, y_test, n_repeats=100,
random_state=42, n_jobs=10)
print('stress is selected')
elif a == 13:
result_stiff = permutation_importance(mlp, X_test, y_test, n_repeats=100,
random_state=42, n_jobs=10)
print('stiffness is selected')
## strain##
result_PI_strain = (100.0*(result_strain.importances/result_strain.importances.max()))
PI_to_Strain=result_strain.importances_mean
df_RI_strain= | pd.DataFrame() | pandas.DataFrame |
from warnings import catch_warnings
import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series
from pandas.util import testing as tm
@pytest.fixture
def single_level_multiindex():
"""single level MultiIndex"""
return MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestMultiIndexLoc(object):
def test_loc_getitem_series(self):
# GH14730
# passing a series as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = Series([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
result = x.loc[[1, 3]]
tm.assert_series_equal(result, expected)
# GH15424
y1 = Series([1, 3], index=[1, 2])
result = x.loc[y1]
tm.assert_series_equal(result, expected)
empty = Series(data=[], dtype=np.float64)
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = np.array([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
# empty array:
empty = np.array([])
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
# 0-dim array (scalar):
scalar = np.int64(1)
expected = Series(
data=[0, 1, 2],
index=['A', 'B', 'C'],
dtype=np.float64)
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_labels.loc['i']
with catch_warnings(record=True):
xp = mi_labels.ix['i']
tm.assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# with a tuple
rs = mi_labels.loc[('i', 'X')]
with catch_warnings(record=True):
xp = mi_labels.ix[('i', 'X')]
tm.assert_frame_equal(rs, xp)
rs = mi_int.loc[4]
with catch_warnings(record=True):
xp = mi_int.ix[4]
tm.assert_frame_equal(rs, xp)
# missing label
pytest.raises(KeyError, lambda: mi_int.loc[2])
with catch_warnings(record=True):
# GH 21593
pytest.raises(KeyError, lambda: mi_int.ix[2])
def test_loc_multiindex_indexer_none(self):
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ['Attribute' + str(i) for i in range(1)]
attribute_values = ['Value' + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes, attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = | DataFrame(df, columns=index) | pandas.DataFrame |
def DeleteDuplicatedElementFromList(list):
resultList = []
for item in list:
if not item in resultList and str(item)!="nan":
resultList.append(item)
return resultList
import pandas as pd
#coding:utf-8
import matplotlib.pyplot as plt
import numpy
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] =False
import numpy as np
from pandas import Series,DataFrame
data_train=pd.read_csv("D:\sufe\A\contest_basic_train.tsv", sep='\t')
print(data_train.info())
print(data_train.describe())
d1=pd.DataFrame(columns=['特征','未逾期比例','逾期比例'])
fig=plt.figure("Y值分类统计图")
fig.set(alpha=0.2)
data_train.Y.value_counts().plot(kind='bar')#我用柱状图
# plt.title(u"Y=0代表未逾期客户,Y=1代表逾期客户")
plt.ylabel(u"人数")
plt.savefig("Y值分类统计图.png")
fig=plt.figure('户籍统计情况图')
data_train.IS_LOCAL.value_counts().plot(kind='bar')
# plt.title(u"户籍情况")
plt.ylabel(u"人数")
plt.savefig("户籍统计情况图.png")
#属性对于目标变量的关联性统计(IS_LOCAL AGENT WORK_PROVINCE,EDU_LEVEL,MARRY_STATUS,SALATY,HAS_FUND)
#IS_LOCAL
Y1=data_train.IS_LOCAL[data_train.Y==0].value_counts()
Y2=data_train.IS_LOCAL[data_train.Y==1].value_counts()
df=pd.DataFrame({u'未逾期客户':Y1,u'逾期客户':Y2})
df.plot(kind='bar',stacked=True)
# plt.title(u"户籍因素中是否是逾期客户分析")
plt.xlabel(u"是否本地户籍")
plt.ylabel(u"人数")
print("本地与非本地因素中客户逾期分析")
print("未逾期客户人数\n",Y1)
print("逾期客户人数\n",Y2)
print("本地户籍中未逾期比例是",1.0*Y1["本地籍"]/(Y1["本地籍"]+Y2["本地籍"])," 逾期比例是:"
,1.0*Y2["本地籍"]/(Y1["本地籍"]+Y2["本地籍"]))
print("非本地户籍中逾期比例是",1.0*Y1["非本地籍"]/(Y1["非本地籍"]+Y2["非本地籍"])," 逾期比例是:"
,1.0*Y2["非本地籍"]/(Y1["非本地籍"]+Y2["非本地籍"]))
d1.iat["本地籍",0]=1.0*Y1["本地籍"]/(Y1["本地籍"]+Y2["本地籍"])
d1.iat["本地籍",1]=1.0*Y2["本地籍"]/(Y1["本地籍"]+Y2["本地籍"])
d1.iat["非本地籍",0]=1.0*Y1["非本地籍"]/(Y1["非本地籍"]+Y2["非本地籍"])
d1.iat["非本地籍",1]=1.0*Y2["非本地籍"]/(Y1["非本地籍"]+Y2["非本地籍"])
plt.savefig('户籍对于逾期的影响.png')
#WORK_PRO工作省份
Y3=data_train.WORK_PROVINCE[data_train.Y==0].value_counts()
Y4=data_train.WORK_PROVINCE[data_train.Y==1].value_counts()
df=pd.DataFrame({u'未逾期客户':Y3,u'逾期客户':Y4})
df.plot(kind='bar',stacked=True)
# plt.title(u"工作省份中是否是逾期客户分析")
plt.xlabel(u"工作省份")
plt.ylabel(u"人数")
plt.savefig('工作省份对于逾期的影响.png')
# listA=data_train["WORK_PROVINCE"]
# b=DeleteDuplicatedElementFromList(listA)
#
# print(b)
# for i in b:
# print(i,"中的未逾期比例",1.0*Y3[i]/(Y3[i]+Y4[i])," 逾期比例是",1.0*Y4[i]/(Y3[i]+Y4[i]))
#EDU_LEVEL
Y3=data_train.EDU_LEVEL[data_train.Y==0].value_counts()
Y4=data_train.EDU_LEVEL[data_train.Y==1].value_counts()
df=pd.DataFrame({u'未逾期客户':Y3,u'逾期客户':Y4})
df.plot(kind='bar',stacked=True)
# plt.title(u"学历与逾期客户分析")
plt.xlabel(u"学历")
plt.ylabel(u"人数")
print("学历与是否是逾期客户的分析")
print("未逾期客户人数:\n",Y3)
print("逾期客户人数:\n",Y4)
d2=pd.DataFrame(columns=['未逾期比例','逾期比例'])
b=["专科","本科","高中","专科及以下","初中","其他","硕士研究生","博士研究生","硕士及以上"]
d2.index=b
for i in b:
print(i,"中的未逾期比例",1.0*Y3[i]/(Y3[i]+Y4[i])," 逾期比例是",1.0*Y4[i]/(Y3[i]+Y4[i]))
d2.iat[i,0]=1.0*Y3[i]/(Y3[i]+Y4[i])
d2.iat[i,1]=1.0*Y4[i]/(Y3[i]+Y4[i])
plt.savefig('学历的影响.png')
#Marry_STATUS
Y3=data_train.MARRY_STATUS[data_train.Y==0].value_counts()
Y4=data_train.MARRY_STATUS[data_train.Y==1].value_counts()
df=pd.DataFrame({u'未逾期客户':Y3,u'逾期客户':Y4})
df.plot(kind='bar',stacked=True)
# plt.title(u"婚姻情况与逾期客户分析")
plt.xlabel(u"婚姻情况")
plt.ylabel(u"人数")
d3=pd.DataFrame(columns=['未逾期比例','逾期比例'])
print("婚姻情况与逾期情况分析")
print("未逾期客户人数:\n",Y3)
print("逾期客户人数:\n",Y4)
b=["丧偶","其他","已婚","未婚","离婚","离异"]
d3.index=b
for i in b:
print(i,"中的未逾期比例",1.0*Y3[i]/(Y3[i]+Y4[i])," 逾期比例是",1.0*Y4[i]/(Y3[i]+Y4[i]))
d3.iat[i,0]=1.0*Y3[i]/(Y3[i]+Y4[i])
d3.iat[i,1]=1.0*Y4[i]/(Y3[i]+Y4[i])
plt.savefig('婚姻的影响.png')
#has_fund
Y3=data_train.HAS_FUND[data_train.Y==0].value_counts()
Y4=data_train.HAS_FUND[data_train.Y==1].value_counts()
df= | pd.DataFrame({u'未逾期客户':Y3,u'逾期客户':Y4}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""coronasense_analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SptFyUf_Y4y1APZxBY-ZteB3q3mcQkPE
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
from matplotlib import cm
from matplotlib.colors import Normalize
from scipy.interpolate import interpn
df = pd.read_csv('data.csv')
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
df = df.set_index(pd.DatetimeIndex(df['timestamp']))
df = df[df['timestamp'] >= pd.Timestamp('2020/08/30')]
df = df[df['timestamp'] <= pd.Timestamp('2020/10/03')]
df = df[df['obj_score'] <= 39]
df = df[df['obj_score'] >= 33.5]
df['obj_score'] = df['obj_score'] + 0.1
print(df['timestamp'])
print(df.index)
def density_scatter( x , y, ax = None, sort = True, bins = 15, **kwargs ) :
if ax is None :
fig , ax = plt.subplots()
data , x_e, y_e = np.histogram2d( x, y, bins = bins, density = True)
z = interpn( ( 0.5*(x_e[1:] + x_e[:-1]) , 0.5*(y_e[1:]+y_e[:-1]) ) , data , np.vstack([x,y]).T , method = "splinef2d", bounds_error = False)
z[np.where(np.isnan(z))] = 0.0
if sort :
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
ax.scatter( x, y, c=z, s=2.5, **kwargs )
norm = Normalize(vmin = np.min(z), vmax = np.max(z))
#cbar = fig.colorbar(cm.ScalarMappable(norm = norm), ax=ax)
#cbar.ax.set_ylabel('Density')
return ax
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
plt.plot(df['timestamp'], df['obj_score'],'.', alpha=0.1, label='Forehead temperature')
df_mean = df.resample('D').apply({'obj_score':'mean'})
df_std = df.resample('D').apply({'obj_score':'std'})
plt.plot(df_mean.index+pd.Timedelta('0.5 day'), df_mean['obj_score'], label='Average over 24h')
plt.fill_between(df_mean.index+pd.Timedelta('0.5 day'), df_mean['obj_score'] - df_std['obj_score']/2, df_mean['obj_score'] + df_std['obj_score']/2,
color='gray', alpha=1.0)
# Set title and labels for axes
ax.set( ylabel="Forehead temp. (deg. C)", xlabel="Time (days)")
# Rotate tick marks on x-axis
#plt.setp(ax.get_xticklabels(), rotation=0)
frame1 = plt.gca()
#frame1.axes.xaxis.set_ticks([])
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
print(len(df))
print("avg {} std {}".format(df['obj_score'].mean(), df['obj_score'].std()))
fig.subplots_adjust(bottom=0.2)
mean_raw = df['obj_score'].mean()
plt.tight_layout()
ax.set_ylim(33,39)
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
days = mdates.DayLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(months)
ax.xaxis.set_minor_locator(days)
# format the coords message box
#ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
#fig.autofmt_xdate()
plt.savefig('all_data.png')
groups = df.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("Location A")
df_filtered = df[df['machine_id'].isin([4428])]
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("Location B")
df_filtered = df[df['machine_id'].isin([2952,3075,3690,3813,3936,4059,4182,4305])]
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
print("Location C")
df_filtered = df[df['machine_id'].isin([6396])]
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
more_than_37 = df['obj_score']
print(len(more_than_37[more_than_37 > more_than_37.mean() + 3*more_than_37.std()]))
df_filtered_notouch = df[df['meteo_realtemp'] > 0]
df_filtered_notouch = df_filtered_notouch[df_filtered_notouch['machine_id'].isin([3075, 3936, 4059, 5781, 4428, 5535, 7134, 2706, 5904, 6396])]
plt.rcParams["figure.dpi"] = 180
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df.hist(column=['obj_score'], bins=20, figsize=(6, 3), ax = plt.gca())
ax.set( ylabel="# Measurements", xlabel="Forehead temperature (degrees C)")
ax.set_title("")
ax.set_xlim(33,39)
# x coordinates for the lines
xcoords = [37.81]
# colors for the lines
colors = ['r']
for xc,c in zip(xcoords,colors):
plt.axvline(x=xc, label='Fever threshold (μ+3σ = {})'.format(xc), c=c)
fig.subplots_adjust(bottom=0.2)
plt.legend()
plt.tight_layout()
plt.savefig('hist_all.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_outside = df[df['meteo_realtemp'] > 0]
df_outside = df_outside[df_outside['machine_id'].isin([3075, 3936, 4059, 5781, 4428, 5535, 7134, 2706, 5904, 6396])]
linear_regressor = LinearRegression()
linear_regressor.fit(df_outside['meteo_realtemp'].values.reshape(-1, 1), df_outside['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_outside['meteo_realtemp'].min(), df_outside['meteo_realtemp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_outside['meteo_realtemp'].values.reshape(-1, 1), df_outside['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Outside temperature (deg. C)",
ylabel="Forehead temp. (deg. C)")
density_scatter(df_outside['meteo_realtemp'], df_outside['obj_score'], ax=ax,label='Forehead temperature')
plt.plot(X, Y_pred, 'red', label=r'Linear fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
print(len(df_outside))
#plt.title("Effect of outside temperature on forehead temperature")
more_than_37 = df_outside['obj_score']
print(len(more_than_37[more_than_37 > more_than_37.mean() + 3*more_than_37.std()]))
plt.tight_layout()
plt.savefig('outside_forehead.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_outside = df[df['meteo_realtemp'] > 0]
df_outside = df_outside[df_outside['machine_id'].isin([3075, 3936, 4059, 5781, 4428, 5535, 7134, 2706, 5904, 6396])]
linear_regressor = LinearRegression()
linear_regressor.fit(df_outside['meteo_realtemp'].values.reshape(-1, 1), df_outside['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_outside['meteo_realtemp'].min(), df_outside['meteo_realtemp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_outside['meteo_realtemp'].values.reshape(-1, 1), df_outside['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Outside temperature (degrees C)",
ylabel="Forehead temp. (deg. C)")
curve = linear_regressor.predict(df_outside['meteo_realtemp'].values.reshape(-1, 1))[:,0]
density_scatter(df_outside['meteo_realtemp'], df_outside['obj_score']-curve+mean_raw,ax = ax, label='Forehead temperature')
ax.set_ylim(33,39)
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
print(len(df_outside))
#plt.title("Measurements corrected from outside temperature model")
new_df = df_outside.copy()
new_df['obj_score'] = new_df['obj_score']-curve+mean_raw
more_than_37 = df_outside['obj_score']-curve+mean_raw
print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.tight_layout()
plt.savefig('outside_forehead_corr.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_ambient = df[df['amb_temp'] > 0]
linear_regressor = LinearRegression()
linear_regressor.fit(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_ambient['amb_temp'].min(), df_ambient['amb_temp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Ambient temperature (degrees C)",
ylabel="Forehead temp. (deg. C)")
ax.set_ylim(33,39)
density_scatter(df_ambient['amb_temp'], df_ambient['obj_score'],ax = ax, label='Forehead temperature')
plt.plot(X, Y_pred, 'red', label=r'Linear fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
#plt.title("Effect of ambient temperature on forehead temperature")
print(len(df_ambient))
plt.tight_layout()
plt.savefig('ambient_forehead.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_ambient = df[df['amb_temp'] > 0]
linear_regressor = LinearRegression()
linear_regressor.fit(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_ambient['amb_temp'].min(), df_ambient['amb_temp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Ambient temperature (degrees C)",
ylabel="Forehead temp. (deg. C)")
curve = linear_regressor.predict(df_ambient['amb_temp'].values.reshape(-1, 1))[:,0]
ax.set_ylim(33,39)
density_scatter(df_ambient['amb_temp'], df_ambient['obj_score']-curve+mean_raw,ax = ax, label='Forehead temperature')
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
#plt.title("Measurements corrected from ambient temperature model")
more_than_37 = df_ambient['obj_score']-curve+mean_raw
print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.tight_layout()
plt.savefig('ambient_forehead_corr.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_ambient = new_df[new_df['amb_temp'] > 0]
linear_regressor = LinearRegression()
linear_regressor.fit(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_ambient['amb_temp'].min(), df_ambient['amb_temp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Ambient temperature (degrees C)",
ylabel="Forehead temp. (degrees C)")
curve = linear_regressor.predict(df_ambient['amb_temp'].values.reshape(-1, 1))[:,0]
ax.set_ylim(33,39)
density_scatter(df_ambient['amb_temp'], df_ambient['obj_score']-curve+mean_raw,ax=ax, label='Forehead temperature')
#plt.plot(df_ambient['amb_temp'], df_ambient['obj_score'],'.', alpha=0.1, label='Forehead temperature')
#plt.plot(X, Y_pred, 'red', label=r'Linear fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
#plt.title("Measurements corrected from outside + ambient models")
more_than_37 = df_ambient['obj_score']-curve+mean_raw
new_df_all = df_ambient.copy()
new_df_all['obj_score'] = new_df_all['obj_score']-curve+mean_raw
print(len(df_ambient))
print(len(new_df_all))
print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.tight_layout()
plt.savefig('foreheah_both_corr.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_hours = df
df_hours['hours'] = df_hours.index.hour
linear_regressor = make_pipeline(
PolynomialFeatures(degree=2),
LinearRegression()
)
linear_regressor.fit(df_hours['hours'].values.reshape(-1, 1), df_hours['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_hours['hours'].min(), df_hours['hours'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_hours['hours'].values.reshape(-1, 1), df_hours['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set_ylim(33,39)
ax.set(xlabel="Hours in the day (GMT)",
ylabel="Forehead temp. (deg. C)")
by_hour = df_hours.groupby(df_hours.index.hour+2).mean()
by_hour_std = df_hours.groupby(df_hours.index.hour+2).std()
density_scatter(df_hours.index.hour, df_hours['obj_score'],ax=ax,label='Forehead temperature')
plt.plot(by_hour.index, by_hour['obj_score'],'-', alpha=1.0, label='Temperature average')
plt.fill_between(by_hour.index, by_hour['obj_score'] - by_hour_std['obj_score']/2, by_hour['obj_score'] + by_hour_std['obj_score']/2,
color='gray', alpha=0.3)
plt.xlim(4.1,21)
plt.plot(X, Y_pred, 'red', label=r'Polynomial fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
print(len(df_hours))
#plt.title("Effect of time in the day on forehead temperature")
#more_than_37 = df_outside['obj_score']
#print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.tight_layout()
plt.savefig('timeday.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_hours = new_df_all
df_hours['hours'] = df_hours.index.hour
linear_regressor = make_pipeline(
PolynomialFeatures(degree=2),
LinearRegression()
)
linear_regressor.fit(df_hours['hours'].values.reshape(-1, 1), df_hours['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_hours['hours'].min(), df_hours['hours'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_hours['hours'].values.reshape(-1, 1), df_hours['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set_ylim(33,39)
curve = linear_regressor.predict(df_hours['hours'].values.reshape(-1, 1))[:,0]
by_hour = df_hours.groupby(df_hours.index.hour+2).mean()
by_hour_std = df_hours.groupby(df_hours.index.hour+2).std()
df_hours['obj_score'] = df_hours['obj_score']-curve+df_hours['obj_score'].mean()
plt.plot(df_hours['timestamp'], df_hours['obj_score'],'.', alpha=0.1, label='Forehead temperature')
df_mean = df_hours.resample('D').apply({'obj_score':'mean'})
df_std = df_hours.resample('D').apply({'obj_score':'std'})
plt.plot(df_mean.index+ | pd.Timedelta('0.5 day') | pandas.Timedelta |
#!/usr/bin/env python
# coding: utf-8
# # import required library
# In[1]:
# Import numpy, pandas for data manipulation
import numpy as np
import pandas as pd
# Import matplotlib, seaborn for visualization
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# In[2]:
# Import the data
weather_data = pd.read_csv('weather.csv')
weather_data.head()
# In[8]:
rain_df = weather_data[['Date','Rainfall']]
rain_df.head()
# In[9]:
rain_df.shape
# In[10]:
rain_df.info()
# **Using 50 values**
# In[15]:
rain_df = rain_df.loc[:49]
rain_df.head()
# In[16]:
rain_df.shape
# In[17]:
# Convert the time column into datetime
rain_df['Date'] = pd.to_datetime(rain_df['Date'])
rain_df['Date'].head()
# In[18]:
rain_df.info()
# In[24]:
# fill the empty row
rain_df = rain_df.fillna(rain_df['Rainfall'].mean())
rain_df.head()
# ### Dataset Explanation
# In[27]:
rain_df.describe()
# In[29]:
# Output the maximum and minimum rain date
print(rain_df.loc[rain_df["Rainfall"] == rain_df["Rainfall"].max()])
print(rain_df.loc[rain_df["Rainfall"] == rain_df["Rainfall"].min()])
# In[30]:
# Reset the index
rain_df.set_index("Date", inplace=True)
# ### Data Visualization
# In[32]:
# Plot the daily temperature change
plt.figure(figsize=(16,10), dpi=100)
plt.plot(rain_df.index, rain_df.Rainfall, color='tab:red')
plt.gca().set(title="Daily Rain", xlabel='Date', ylabel="rain value")
plt.show()
# In[35]:
# Apply the Moving Average function by a subset of size 10 days.
rain_df_mean = rain_df.Rainfall.rolling(window=10).mean()
rain_df_mean.plot(figsize=(16,10))
plt.show()
# In[37]:
from statsmodels.tsa.seasonal import seasonal_decompose
# Additive Decomposition
result_add = seasonal_decompose(rain_df.Rainfall, model='additive', extrapolate_trend=0)
# Plot
plt.rcParams.update({'figure.figsize': (10,10)})
result_add.plot().suptitle('Additive Decomposition', fontsize=22)
plt.show()
# ### Baseline Model
# In[38]:
# Shift the current rain to the next day.
predicted_df = rain_df["Rainfall"].to_frame().shift(1).rename(columns = {"Rainfall": "rain_pred" })
actual_df = rain_df["Rainfall"].to_frame().rename(columns = {"Rainfall": "rain_actual" })
# Concatenate the actual and predicted rain
one_step_df = | pd.concat([actual_df,predicted_df],axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 9 10:50:38 2021
@author: github.com/sahandv
take ideas from:
https://towardsdatascience.com/multi-class-text-classification-with-lstm-1590bee1bd17
https://github.com/susanli2016/NLP-with-Python/blob/master/Multi-Class%20Text%20Classification%20LSTM%20Consumer%20complaints.ipynb
https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/
"""
import sys
import gc
import pandas as pd
import numpy as np
import networkx as nx
import karateclub as kc
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from sklearn.model_selection import train_test_split
import tensorflow as tf
import keras as keras
from keras.wrappers.scikit_learn import KerasClassifier
from keras import backend as K
from keras.models import Model
from keras.layers import Dense, Input, Dropout, concatenate
from keras.models import Sequential, load_model
from keras.optimizers import SGD, Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.utils.np_utils import to_categorical# from tensorflow.contrib.keras import layers
from keras.utils.vis_utils import plot_model
from tensorflow.keras.layers import BatchNormalization
from keras_visualizer import visualizer
from tensorflow.keras.callbacks import TensorBoard
pretrain = True
get_output = True
# read labels
datapath = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/'
# datapath = '/home/sahand/GoogleDrive/Data/'
data_dir = datapath+'Corpus/cora-classify/cora/'
label_address = data_dir+'clean/single_component_small_18k/labels'
labels = | pd.read_csv(label_address) | pandas.read_csv |
import pandas._libs.tslibs.nattype
from sklearn import linear_model
from sklearn.metrics import r2_score
import numpy as np
import pandas as pd
from math import log, isnan
from statistics import stdev
from numpy import repeat
from strategy import *
def calc_features(ivv_hist, bonds_hist, n_vol):
# Takes in:
# ivv_hist, a pandas dataframe of OHLC data with a Date column
# bonds_hist, a CMT rates dataframe of this form:
# https://www.treasury.gov/resource-center/data-chart-center
# /interest-rates/pages/textview.aspx?data=yield.
# n_vol: number of trading days over which vol for IVV is to be calculated
# This function is what we'll apply to every row in bonds_hist.
def bonds_fun(yields_row):
maturities = | pd.DataFrame([1 / 12, 2 / 12, 3 / 12, 6 / 12, 1, 2]) | pandas.DataFrame |
from typing import List
import matplotlib.pyplot as plt
import numbers
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.metrics import auc, plot_roc_curve, roc_curve, RocCurveDisplay
from sklearn.model_selection import KFold, LeaveOneOut, GroupKFold, LeaveOneGroupOut
from sklearn.preprocessing import label_binarize
from matplotlib import colors
import copy
import decimal
from .multiclass_fitparams import OneVsRestClassifierPatched
from ._cv_eval_set import init_eval_set, _make_transformer, _eval_set_selection
class classplot():
# 散布図カラーリスト
_SCATTER_COLORS = ['green', 'red', 'mediumblue', 'brown', 'darkmagenta', 'darkorange', 'gold', 'grey']
# クラス確率図カラーマップ
_PROB_CMAP = ['Greens', 'Reds', 'Blues', 'YlOrBr', 'Purples', 'OrRd', 'Wistia', 'Greys']
# デフォルトでの決定境界図の透明度(alpha)
_DEFAULT_SEPARATOR_ALPHA = 0.3
# デフォルトでのクラス確率図等高線モードの透明度(alpha)
_DEFAULT_PROBA_CONTOURF_ALPHA = 0.5
# デフォルトでのクラス確率図透明度補正シグモイド関数のゲイン
_DEFAULT_PROBA_CONTOURF_SIG_GAIN = 0.5
# デフォルトでのクラス確率図の等高線段階数
_DEFAULT_PROBA_CONTOURF_LEVELS = 10
# デフォルトでのクラス確率図RGB画像モードの透明度(alpha)
_DEFAULT_PROBA_RGB_ALPHA = 0.45
def _round_digits(src: float, rounddigit: int = None, method='decimal'):
"""
指定桁数で小数を丸める
Parameters
----------
src : float
丸め対象の数値
rounddigit : int
フィッティング線の表示範囲(標準偏差の何倍まで表示するか指定)
method : int
桁数決定手法('decimal':小数点以下, 'sig':有効数字(Decimal指定), 'format':formatで有効桁数指定)
"""
if method == 'decimal':
return round(src, rounddigit)
elif method == 'sig':
with decimal.localcontext() as ctx:
ctx.prec = rounddigit
return ctx.create_decimal(src)
elif method == 'format':
return '{:.{width}g}'.format(src, width=rounddigit)
def _reshape_input_data(x, y, data, x_colnames, cv_group):
"""
入力データの形式統一(pd.DataFrame or np.ndarray)
"""
# dataがpd.DataFrameのとき
if isinstance(data, pd.DataFrame):
if not isinstance(x, list):
raise Exception('`x` argument should be list[str] if `data` is pd.DataFrame')
if not isinstance(y, str):
raise Exception('`y` argument should be str if `data` is pd.DataFrame')
if x_colnames is not None:
raise Exception('`x_colnames` argument should be None if `data` is pd.DataFrame')
X = data[x].values
y_true = data[y].values
x_colnames = x
y_colname = y
cv_group_colname = cv_group
# dataがNoneのとき(x, y, cv_groupがnp.ndarray)
elif data is None:
if not isinstance(x, np.ndarray):
raise Exception('`x` argument should be np.ndarray if `data` is None')
if not isinstance(y, np.ndarray):
raise Exception('`y` argument should be np.ndarray if `data` is None')
X = x if len(x.shape) == 2 else x.reshape([x.shape[0], 1])
y_true = y.ravel()
# x_colnameとXの整合性確認
if x_colnames is None:
x_colnames = list(range(X.shape[1]))
elif X.shape[1] != len(x_colnames):
raise Exception('width of X must be equal to length of x_colnames')
else:
x_colnames = x_colnames
y_colname = 'objective_variable'
if cv_group is not None: # cv_group指定時
cv_group_colname = 'group'
data = pd.DataFrame(np.column_stack((X, y_true, cv_group)),
columns=x_colnames + [y_colname] + [cv_group_colname])
else:
cv_group_colname = None
data = pd.DataFrame(np.column_stack((X, y)),
columns=x_colnames + [y_colname])
else:
raise Exception('`data` argument should be pd.DataFrame or None')
return X, y_true, data, x_colnames, y_colname, cv_group_colname
@classmethod
def _chart_plot_2d(cls, trained_clf, x_chart, y_true_col, y_pred_col, data, x_chart_indices,
x1_start, x1_end, x2_start, x2_end, other_x, chart_scale,
proba_pred_col, proba_class_indices, ax, plot_border, plot_scatter,
scatter_color_dict, scatter_marker_dict, proba_cmap_dict, proba_type,
contourf_kws=None, imshow_kws=None, scatter_kws=None, legend_kws=None):
"""
分類チャート(決定境界図 or クラス確率図)と各種散布図の表示
(class_separator_plotあるいはclass_prob_plotメソッドの描画処理部分)
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
# 図のサイズからグリッド数を取得
xnum, ynum = plt.gcf().dpi * plt.gcf().get_size_inches()
# チャート用グリッドデータを作成
xx = np.linspace(x1_start, x1_end, num=int(xnum/chart_scale))
yy = np.linspace(x2_start, x2_end, num=int(ynum/chart_scale))
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
# 推論用に全説明変数を保持したndarrayを作成 (チャート非使用変数は固定値other_xとして追加)
n_rows = X_grid.shape[0]
X_all = []
other_add_flg = False
for i in range(2 + len(other_x)):
if i == x_chart_indices[0]: # チャート使用変数(1個目)を追加
X_all.append(X_grid[:, 0].reshape(n_rows, 1))
elif i == x_chart_indices[1]: # チャート使用変数(2個目)を追加
X_all.append(X_grid[:, 1].reshape(n_rows, 1))
elif len(other_x) >= 1 and not other_add_flg: # チャート非使用変数(1個目)を固定値として追加
X_all.append(np.full((n_rows, 1), other_x[0]))
other_add_flg = True
elif len(other_x) == 2: # チャート非使用変数(2個目)を固定値として追加
X_all.append(np.full((n_rows, 1), other_x[1]))
X_all = np.hstack(X_all)
# グリッドデータに対して推論し、推定値を作成
y_pred_grid = trained_clf.predict(X_all)
# 推定値をint型に変換
class_int_dict = dict(zip(scatter_color_dict.keys(), range(len(scatter_color_dict))))
y_pred_grid_int = np.vectorize(lambda x: class_int_dict[x])(y_pred_grid)
# グリッドデータをピボット化
y_pred_pivot = y_pred_grid_int.reshape(X1.shape)
# 決定境界図をプロット
if proba_pred_col is None:
# 決定境界色分けプロット
ax.contourf(X1, X2, y_pred_pivot,
levels=np.arange(y_pred_pivot.max() + 2) - 0.5,
**contourf_kws)
# クラス確率図をプロット
else:
# クラス数
nclass = len(proba_class_indices)
# グリッドデータに対してクラス確率算出
y_proba_grid = trained_clf.predict_proba(X_all)[:, proba_class_indices]
# contourfで等高線プロット(塗りつぶしあり)するとき
if proba_type == 'contourf':
# alpha値を保持(描画終了後に更新前に戻すため)
src_alpha = contourf_kws['alpha']
# シグモイド関数(クラス数1のときalphaで、クラス数∞のとき1に漸近)でalphaを補正
contourf_kws['alpha'] = 2*(1-src_alpha)/(1+np.exp(-cls._DEFAULT_PROBA_CONTOURF_SIG_GAIN*(nclass-1)))+2*src_alpha-1
# クラスごとに処理
for i in range(nclass):
# グリッドデータから該当クラスのみ抜き出してピボット化
y_proba_pivot = y_proba_grid[:, i].reshape(X1.shape)
# カラーマップをproba_cmap_dictの値から取得
cmap = list(proba_cmap_dict.values())[i]
# クラス確率図プロット
ax.contourf(X1, X2, y_proba_pivot,
cmap=cmap,
**contourf_kws)
# alpha値を更新(alpha/(1+alpha))
old_alpha = contourf_kws['alpha']
contourf_kws['alpha'] = old_alpha / (1 + old_alpha)
# alpha値を更新前に戻す
contourf_kws['alpha'] = src_alpha
# contourで等高線プロット(塗りつぶしなし)するとき
elif proba_type == 'contour':
# クラスごとに処理
for i in range(nclass):
# グリッドデータから該当クラスのみ抜き出してピボット化
y_proba_pivot = y_proba_grid[:, i].reshape(X1.shape)
# 線の色をscatter_color_dictの値から取得
cmap = list(proba_cmap_dict.values())[i]
#c=list(scatter_color_dict.values())[proba_class_indices[i]]
ax.contour(X1, X2, y_proba_pivot,
cmap=cmap,
**contourf_kws)
# imshowでRGB画像プロットするとき
elif proba_type == 'imshow':
# いったんRGB各色ゼロで埋める
proba_g = np.zeros(X1.shape) # 緑
proba_r = np.zeros(X1.shape) # 赤
proba_b = np.zeros(X1.shape) # 青
# RGBいずれかのカラーマップを持つクラスが存在すれば、そのクラスの確率を格納
for i, cmap in enumerate(proba_cmap_dict.values()):
if cmap == 'Greens':
proba_g = y_proba_grid[:, i].reshape(X1.shape)
elif cmap == 'Reds':
proba_r = y_proba_grid[:, i].reshape(X1.shape)
elif cmap == 'Blues':
proba_b = y_proba_grid[:, i].reshape(X1.shape)
else:
# imshowのとき、Greens, Reds, Blues以外のカラーマップを指定したらエラーを出す(4クラス以上は描画不可)
raise Exception('only "Greens, Reds, Blues" cmap are allowd if the "proba_type" argument is "imshow"')
# RGBのデータを合体して上下反転
im_grid = np.flip(np.stack([proba_r, proba_g, proba_b], 2), axis=0)
# RGB画像をプロット
ax.imshow(im_grid,
aspect="auto", extent=(x1_start, x1_end, x2_start, x2_end),
**imshow_kws)
else:
raise Exception('the "proba_type" argument must be "contourf", "contour" or "imshow"')
# 境界線をプロット
if plot_border:
ax.contour(X1, X2, y_pred_pivot,
levels=np.arange(y_pred_pivot.max() + 2) - 0.5,
colors='k',
linewidths=0.5,
antialiased=True)
# 散布図をプロット
if plot_scatter is not None:
# マーカの縁の色未指定のとき、dimgreyを指定
if 'edgecolors' not in scatter_kws.keys():
scatter_kws['edgecolors'] = 'dimgrey'
# 正誤を判定
data['error'] = (data[y_true_col] == data[y_pred_col])
# 色分け
if plot_scatter == 'error': # 正誤で色分け
cdict = {True:'blue', False:'red'}
for name, group in data.groupby('error'):
ax.scatter(group[x_chart[0]].values, group[x_chart[1]].values,
label=name, c=cdict[name],
marker=scatter_marker_dict[name],
**scatter_kws)
elif plot_scatter == 'class': # クラスで色分け
for name, group in data.groupby(y_true_col):
ax.scatter(group[x_chart[0]].values, group[x_chart[1]].values,
label=name, c=scatter_color_dict[name],
**scatter_kws)
elif plot_scatter == 'class_error': # クラスと正誤で色分け
for name, group in data.groupby([y_true_col, 'error']):
ax.scatter(group[x_chart[0]].values, group[x_chart[1]].values,
label=f'{name[0]} {name[1]}', c=scatter_color_dict[name[0]],
marker=scatter_marker_dict[name[1]],
**scatter_kws)
# 凡例表示
ax.legend(**legend_kws)
# 軸ラベルを追加
ax.set_xlabel(x_chart[0])
ax.set_ylabel(x_chart[1])
@classmethod
def _class_chart_plot(cls, trained_clf, X, y_pred, y_true, x_chart, x_not_chart, x_chart_indices,
pair_sigmarange=2.0, pair_sigmainterval=0.5, chart_extendsigma=0.5, chart_scale=1,
proba_pred = None, proba_class_indices = None, plot_border=True, plot_scatter='class',
scatter_color_dict=None, scatter_marker_dict=None, proba_cmap_dict=None, proba_type=None,
rounddigit_x3=None, cv_index=None,
subplot_kws=None, contourf_kws=None, imshow_kws=None, scatter_kws=None, legend_kws=None):
"""
分類チャート(決定境界図 or クラス確率図)表示の、説明変数の数に応じた分岐処理
(class_separator_plotあるいはclass_prob_plotメソッド処理のうち、説明変数の数に応じたデータ分割等を行う)
"""
# 説明変数の数
x_num = X.shape[1]
# チャート(決定境界図 or クラス確率図)使用DataFrame
df_chart = | pd.DataFrame(X[:, x_chart_indices], columns=x_chart) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 20 10:52:09 2019
@author: <NAME>
"""
import requests, smtplib, os, datetime
import pandas as pd
from bs4 import *
import urllib.request as ur
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from matplotlib import pyplot as plt
# Specify the beginning and end of the time frame of possible dates as YYYYMMDD
timeframe_begin = 20200601
timeframe_end = 20200830
def scrape_kayak(start='', end='', airport = 'BER'):
"""
This function scrapes flight information from the kayak explore page.
Parameters:
start, end, airport - integer representing earliest possible departure date
in YYYYMMDD format, integer representing latest return date, string with
three letter code for starting airport. When both are start and end are
left blank, results are returned from present date to one year in the
future.
Returns:
df - a data frame containing all destination cities and corresponding
flight information returned by the scrape
"""
# Format the beginning and end dates to insert them into the URL
start = '&depart=' + str(start)
end = '&return=' + str(end)
url = "https://www.kayak.com/s/horizon/exploreapi/elasticbox?airport=" + airport + "&v=1" + start + end + \
"&stopsFilterActive=false&duration=&budget=&topRightLat=68.58212830775821&topRightLon=180&bottomLeftLat=-6.168763628541718&bottomLeftLon=-180&zoomLevel=2"
response = requests.post(url).json()
df = pd.DataFrame(columns=['City', 'Country', 'Price', 'Airline', 'Airport', 'Date', 'Link'])
for i in range(len(response['destinations'])):
destination = response['destinations'][i]
row = list([destination['city']['name'], destination['country']['name'],
destination['flightInfo']['price'], destination['airline'],
destination['airport']['shortName'], | pd.to_datetime(destination['departd']) | pandas.to_datetime |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
class TestPeriodIndex(TestCase):
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
self.assert_(isinstance(series, TimeSeries))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp('D', 'end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-DEC')
result = series.to_timestamp('D', 'start')
self.assert_(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
def test_constructor(self):
ii = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 9)
ii = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 4 * 9)
ii = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 12 * 9)
ii = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 365 * 9 + 2)
ii = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 261 * 9)
ii = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(ii), 365 * 24)
ii = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(ii), 24 * 60)
ii = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(ii), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = | PeriodIndex(start=start, end=end_intv) | pandas.tseries.period.PeriodIndex |
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+
datetime.timedelta(days=7))) & (pandas.to_datetime(frame['Created_tr'])
<= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '10D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min()) & (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '3M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '6M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6))) & ( | pandas.to_datetime(frame['Created_tr']) | pandas.to_datetime |
from __future__ import print_function
import argparse
import math
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
from cnvrg import Experiment
from sklearn.metrics import mean_squared_error
tf.disable_v2_behavior()
import psutil
import time
tic = time.time()
parser = argparse.ArgumentParser(description="""Preprocessor""")
parser.add_argument('-f', '--train_file', action='store', dest='train_file',
default='/data/movies_rec_sys/train_whole.csv', required=True, help="""training_file""")
parser.add_argument('--test_file', action='store', dest='test_file', default='/data/movies_rec_sys/test_whole.csv',
required=True, help="""test_file""")
parser.add_argument('--num_of_steps_1', action='store', dest='num_of_steps_1', default=100, required=True,
help="""number of iterations""")
parser.add_argument('--embed_dim_1', action='store', dest='embed_dim_1', default=50, required=True,
help="""number of factors""")
parser.add_argument('--reg_coef', action='store', dest='reg_coef', default=0.02, required=True,
help="""regularization coefficient""")
parser.add_argument('--threshold', action='store', dest='threshold', default=0.8, required=True,
help="""threshold for choosing recommendations""")
parser.add_argument('--precision_at_value', action='store', dest='precision_at_value', default=10, required=True,
help="""precision and recall at k""")
args = parser.parse_args()
train_file = args.train_file
test_file = args.test_file
n_iters_1 = int(args.num_of_steps_1)
n_factors_1 = int(args.embed_dim_1)
reg_coef_1 = float(args.reg_coef)
threshold = float(args.threshold)
K = int(args.precision_at_value)
hyp = pd.DataFrame(columns=['dimension', 'reg_coef'])
hyp.at[0, 'dimension'] = n_factors_1
hyp.at[0, 'reg_coef'] = reg_coef_1
hyp.to_csv('hyp.csv')
hyp_file = 'hyp1.csv'
hyp.to_csv("/cnvrg/{}".format(hyp_file), index=False)
train_whole = pd.read_csv(train_file)
test_whole = | pd.read_csv(test_file) | pandas.read_csv |
# Copyright 2019 Toyota Research Institute. All rights reserved.
"""Unit tests related to batch validation"""
import json
import os
import unittest
import pandas as pd
import numpy as np
import boto3
from botocore.exceptions import NoRegionError, NoCredentialsError
from monty.tempfile import ScratchDir
from beep.validate import ValidatorBeep, validate_file_list_from_json, \
SimpleValidator
from beep import S3_CACHE, VALIDATION_SCHEMA_DIR
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
@unittest.skip
class ValidationArbinTest(unittest.TestCase):
def setUp(self):
# Setup events for testing
try:
kinesis = boto3.client('kinesis')
response = kinesis.list_streams()
self.events_mode = "test"
except NoRegionError or NoCredentialsError as e:
self.events_mode = "events_off"
def test_validation_arbin_bad_index(self):
path = "2017-05-09_test-TC-contact_CH33.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = ValidatorBeep()
v.allow_unknown = True
df = pd.read_csv(path, index_col=0)
self.assertFalse(v.validate_arbin_dataframe(df))
self.assertEqual(v.errors['cycle_index'][0][0][0], 'must be of number type')
# Test bigger file
path = "2017-08-14_8C-5per_3_47C_CH44.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = ValidatorBeep()
v.allow_unknown = True
df = pd.read_csv(path, index_col=0)
self.assertFalse(v.validate_arbin_dataframe(df))
self.assertEqual(v.errors['cycle_index'][0][0][0], 'must be of number type')
def test_validation_arbin_bad_data(self):
path = "2017-12-04_4_65C-69per_6C_CH29.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = ValidatorBeep()
v.allow_unknown = True
df = pd.read_csv(path, index_col=0)
self.assertTrue(v.validate_arbin_dataframe(df))
# Alter the schema on-the-fly to induce error
v.schema['discharge_capacity']['schema']['max'] = 1.8
self.assertFalse(v.validate_arbin_dataframe(df, schema=v.schema))
self.assertEqual(v.errors['discharge_capacity'][0][11264][0], 'max value is 1.8')
# Alter the schema on-the-fly to move on to the next errors
v.schema['discharge_capacity']['schema']['max'] = 2.1
v.schema['step_time'] = {"schema": {"min": 0.0, "type": "float"},
"type": "list"}
self.assertFalse(v.validate_arbin_dataframe(df, schema=None))
self.assertEqual(v.errors['step_time'][0][206][0], 'min value is 0.0')
# Alter schema once more to recover validation
del v.schema['step_time']['schema']['min']
self.assertTrue(v.validate_arbin_dataframe(df, schema=None))
def test_validation_many_from_paths(self):
paths = ["2017-05-09_test-TC-contact_CH33.csv",
"2017-12-04_4_65C-69per_6C_CH29.csv"]
paths = [os.path.join(TEST_FILE_DIR, path) for path in paths]
v = ValidatorBeep()
temp_records = os.path.join(TEST_FILE_DIR, 'temp_records.json')
with open(temp_records, 'w') as f:
f.write("{}")
results = v.validate_from_paths(paths, record_results=False)
self.assertFalse(results["2017-05-09_test-TC-contact_CH33.csv"]["validated"])
errmsg = results["2017-05-09_test-TC-contact_CH33.csv"]["errors"]['cycle_index'][0][0][0]
self.assertEqual(errmsg, 'must be of number type')
self.assertTrue(results["2017-12-04_4_65C-69per_6C_CH29.csv"]["validated"])
v.validate_from_paths(paths, record_results=True, record_path=temp_records)
with open(temp_records, 'r') as f:
results_form_rec = json.load(f)
self.assertFalse(results_form_rec["2017-05-09_test-TC-contact_CH33.csv"]["validated"])
results = v.validate_from_paths(paths, record_results=True, skip_existing=True,
record_path=temp_records)
self.assertEqual(results, {})
@unittest.skip
def test_bad_file(self):
paths = ["2017-08-14_8C-5per_3_47C_CH44.csv"]
paths = [os.path.join(TEST_FILE_DIR, path) for path in paths]
v = ValidatorBeep()
results = v.validate_from_paths(paths, record_results=False)
def test_validation_from_json(self):
with ScratchDir('.'):
os.environ['BEEP_ROOT'] = os.getcwd()
os.mkdir("data-share")
os.mkdir(os.path.join("data-share", "validation"))
paths = ["2017-05-09_test-TC-contact_CH33.csv",
"2017-12-04_4_65C-69per_6C_CH29.csv"]
paths = [os.path.join(TEST_FILE_DIR, path) for path in paths]
# Create dummy json obj
json_obj = {
"mode": self.events_mode,
"file_list": paths,
'run_list': list(range(len(paths)))
}
json_string = json.dumps(json_obj)
json_output = validate_file_list_from_json(json_string)
loaded = json.loads(json_output)
self.assertEqual(loaded['validity'][0], 'invalid')
self.assertEqual(loaded['validity'][1], 'valid')
class ValidationMaccorTest(unittest.TestCase):
# To further develop as Maccor data / schema becomes available
def setUp(self):
# Setup events for testing
try:
kinesis = boto3.client('kinesis')
response = kinesis.list_streams()
self.events_mode = "test"
except NoRegionError or NoCredentialsError as e:
self.events_mode = "events_off"
def test_validation_maccor(self):
path = "xTESLADIAG_000019_CH70.070"
path = os.path.join(TEST_FILE_DIR, path)
v = SimpleValidator(schema_filename=os.path.join(VALIDATION_SCHEMA_DIR, "schema-maccor-2170.yaml"))
v.allow_unknown = True
header = pd.read_csv(path, delimiter='\t', nrows=0)
print(header)
df = pd.read_csv(path, delimiter='\t', skiprows=1)
df['State'] = df['State'].astype(str)
df['current'] = df['Amps']
print(df.dtypes)
validity, reason = v.validate(df)
print(validity, reason)
self.assertTrue(validity)
def test_validate_from_paths_maccor(self):
paths = [os.path.join(TEST_FILE_DIR, "xTESLADIAG_000019_CH70.070")]
# Run validation on everything
v = SimpleValidator()
validate_record = v.validate_from_paths(paths, record_results=True,
skip_existing=False)
df = pd.DataFrame(v.validation_records)
df = df.transpose()
print(df)
print(df.loc["xTESLADIAG_000019_CH70.070", :])
self.assertEqual(df.loc["xTESLADIAG_000019_CH70.070", "method"], "simple_maccor")
self.assertEqual(df.loc["xTESLADIAG_000019_CH70.070", "validated"], True)
class ValidationEisTest(unittest.TestCase):
# To further develop
def setUp(self):
pass
def test_validation_maccor(self):
path = "maccor_test_file_4267-66-6519.EDA0001.041"
path = os.path.join(TEST_FILE_DIR, path)
v = ValidatorBeep()
v.allow_unknown = True
df = pd.read_csv(path, delimiter='\t', skip_blank_lines=True, skiprows=10)
self.assertTrue(v.validate_eis_dataframe(df))
class SimpleValidatorTest(unittest.TestCase):
def setUp(self):
# Setup events for testing
try:
kinesis = boto3.client('kinesis')
response = kinesis.list_streams()
self.events_mode = "test"
except NoRegionError or NoCredentialsError as e:
self.events_mode = "events_off"
def test_file_incomplete(self):
path = "FastCharge_000025_CH8.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = SimpleValidator()
df = pd.read_csv(path, index_col=0)
validity, reason = v.validate(df)
self.assertFalse(validity)
self.assertEqual(
reason, "cycle_index needs to reach at least 1 "
"for processing, instead found:value=0.0")
def test_basic(self):
path = "2017-05-09_test-TC-contact_CH33.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = SimpleValidator()
df = pd.read_csv(path, index_col=0)
validity, reason = v.validate(df)
self.assertFalse(validity)
self.assertEqual(
reason, "Column cycle_index: integer type check failed "
"at index 0 with value nan")
# Test bigger file, with float/numeric type
path = "2017-08-14_8C-5per_3_47C_CH44.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = SimpleValidator()
df = pd.read_csv(path, index_col=0)
v.schema['cycle_index']['schema']['type'] = 'float'
validity, reason = v.validate(df)
self.assertTrue(validity)
self.assertEqual(reason, '')
v.schema['cycle_index']['schema']['type'] = 'numeric'
validity, reason = v.validate(df)
self.assertTrue(validity)
self.assertEqual(reason, '')
# Test good file
path = "2017-12-04_4_65C-69per_6C_CH29.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = SimpleValidator()
df = | pd.read_csv(path, index_col=0) | pandas.read_csv |
import os
import numpy as np
import random
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
def read_data(data_dir, symbol, dates):
df = pd.DataFrame(index=dates)
new_df = pd.read_csv(data_dir+ "hkex_" + symbol +".csv", index_col='Date', parse_dates=True, usecols=['Date', 'Close'], na_values=['nan'])
new_df = new_df.rename(columns={'Close': symbol})
df = df.join(new_df)
# data pre-processing
df = df.rename(columns={symbol: 'Close'})
df = df.fillna(method='ffill')
df = df.fillna(0.0)
return df
def read_strategy_data(data_dir, symbol, dates, strategy):
df = pd.DataFrame(index=dates)
new_df = pd.read_csv(data_dir + symbol + ".HK_" + strategy + ".csv", index_col='Date', parse_dates=True, usecols=['Date', 'Close'], na_values=['nan'])
new_df = new_df.rename(columns={'Close': symbol})
df = df.join(new_df)
# data pre-processing
df = df.rename(columns={symbol: 'Close'})
df = df.fillna(method='ffill')
df = df.fillna(0.0)
return df
# create train, test data given stock data and sequence length
def load_data(data_raw, look_back):
data = []
# create all possible sequences of length seq_len
for index in range(len(data_raw) - look_back):
data.append(data_raw[index: index + look_back])
data = np.array(data)
test_set_size = int(np.round(0.2*data.shape[0]))
train_set_size = data.shape[0] - (test_set_size)
x_train = data[:train_set_size,:-1,:]
y_train = data[:train_set_size,-1,:]
x_test = data[train_set_size:,:-1]
y_test = data[train_set_size:,-1,:]
return [x_train, y_train, x_test, y_test]
def load_test_data(df):
# pre-processing
df = df.fillna(method='ffill')
values = df.values
# print(values)
# ensure all data is float
values = values.astype('float32')
# normalise features
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled = scaler.fit_transform(values)
return df, scaled, scaler
# return dataframe with stock tick and sentiment scores
def merge_data(ticker, data_dir, sentiment_data_dir, strategy, start_date=None, end_date=None):
merge_path = os.path.join(data_dir,ticker.zfill(4) + '.HK_' + strategy + '.csv')
sentiment_path = os.path.join(sentiment_data_dir,'data-' + ticker.zfill(5) + '-result.csv')
sentiment_df = pd.read_csv(sentiment_path, index_col='dates',parse_dates=['dates'], na_values=['nan'])
if (strategy == 'all'):
merge_df = pd.read_csv(merge_path,index_col='Date',usecols=['Date',"oscillator_signal","rsi_signal","williams_R_signal","macd_signal",'GDP','Unemployment rate','Property price','Close'],parse_dates=['Date'], na_values=['nan'])
elif (strategy == 'macd-crossover'):
merge_df = pd.read_csv(merge_path,index_col='Date', usecols=['Date','signal','GDP','Unemployment rate','Property price','Close'], parse_dates=['Date'], na_values=['nan'])
merge_df = merge_df.rename(columns={'signal': 'technical_signal'})
df = pd.merge(merge_df, sentiment_df, how='inner', left_index=True, right_index=True)
if (start_date != None) and (end_date != None):
df = df.loc[pd.Timestamp(start_date):pd.Timestamp(end_date)]
# pre-processing
df = df.fillna(method='ffill')
values = df.values
# ensure all data is float
values = values.astype('float32')
# normalise features
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled = scaler.fit_transform(values)
return df, scaled, scaler
# return dataframe with stock tick and sentiment scores
def merge_data_daily(ticker, data_dir, sentiment_data_dir, strategy, start_date=None, end_date=None):
merge_path = os.path.join(data_dir,ticker.zfill(4)+'.HK_' + strategy + '.csv')
sentiment_path = os.path.join(sentiment_data_dir,'data-'+ticker.zfill(5)+'-result.csv')
sentiment_df = | pd.read_csv(sentiment_path,index_col='dates',parse_dates=['dates'], na_values=['nan']) | pandas.read_csv |
'''
the 'load' module provides common access to the 'sim' and 'hsr' modules, including
automated batch routines and plotting. all parsing logic for sim files/reports should
be accomplished in 'sim.py' or 'hsr.py'; this is mainly an API for script-running.
'''
import os
import xlwings as xw
import shutil
import pandas as pd
from .sim.sim import RptHandler
from .sim import batch
from .hsr.hsr import hsr_df as hsr_df
from .hsr.unit_dict import unit_dict
#from .inp import inp
from .spaceloads.spaceloads import spaceloads
class LoadSim:
'''
Main entry-point for eqparse module.
Can pass in sim file or hourly file or just general extensionless name, e.g.
"Prop.SIM", "Prop.hsr", "Prop" will all work.
'''
def __init__(self, file, hsr=False, inpfile=None):
file = file.replace('.SIM', '').replace('.hsr', '')
self.fname = file.split("\\")[-1].split("/")[-1]
self.sim = RptHandler(file + '.SIM')
self.path = os.path.dirname(file)
if hsr:
try:
self.hsr = hsr_df(file + '.hsr')
except:
print(
"HSR validation failed. Check eQUEST component names for commas or other special characters.")
if inpfile is not None:
self.inpfile = inpfile
# doesn't work, needs further parsing.
# spcloadfile = '-'.join(file.split('-')[:-1]) + '- SpaceLoads.csv'
# try:
# self.spaceloads = spaceloads(spcloadfile)
# except:
# self.spaceloads = 'Not Loaded: {0}'.format(spcloadfile)
def tidy_enduses(self, dropzeros=True, includetotal=False, rename=True, splitmeters=False):
'''
returns tidy dataframe with concatenated enduses + value
'''
try:
beps, bepu, cost = self.annual_summaries(writecsv=False)
iscost = True
except:
beps, bepu = self.annual_summaries(writecsv=False)
iscost = False
if iscost:
costmelt = pd.melt(
cost, id_vars=['File', 'Rate', 'Meter', 'Utility'])
costmelt['Enduse'] = costmelt['variable'] + \
' - ' + costmelt['Meter']
bepsmelt = pd.melt(beps, id_vars=['File', 'Meter', 'Utility', 'Value'])
bepsmelt['Enduse'] = bepsmelt['variable'] + ' - ' + bepsmelt['Meter']
bepumelt = pd.melt(bepu, id_vars=['File', 'Meter', 'Utility', 'Value'])
bepumelt['Enduse'] = bepumelt['variable'] + ' - ' + bepumelt['Meter']
if dropzeros:
if iscost:
costmelt = costmelt[costmelt.value != 0]
bepumelt = bepumelt[bepumelt.value != 0]
bepsmelt = bepsmelt[bepsmelt.value != 0]
if not includetotal:
if iscost:
costmelt = costmelt[costmelt.variable != 'Total']
bepumelt = bepumelt[bepumelt.variable != 'Total']
bepsmelt = bepsmelt[bepsmelt.variable != 'Total']
if iscost:
costmelt = costmelt[['Enduse', 'value']
].set_index('Enduse', drop=True)
bepumelt = bepumelt[['Enduse', 'value']].set_index('Enduse', drop=True)
bepsmelt = bepsmelt[['Enduse', 'value']].set_index('Enduse', drop=True)
if rename:
if iscost:
costmelt.columns = [self.fname + ' - COST']
bepumelt.columns = [self.fname + ' - BEPU']
bepsmelt.columns = [self.fname + ' - BEPS']
if splitmeters:
bepumelt['Enduse'] = bepumelt.apply(
lambda x: x.name.split(' - ')[0], axis=1)
bepumelt['Meter'] = bepumelt.apply(
lambda x: x.name.split(' - ')[1], axis=1)
bepsmelt['Enduse'] = bepsmelt.apply(
lambda x: x.name.split(' - ')[0], axis=1)
bepsmelt['Meter'] = bepsmelt.apply(
lambda x: x.name.split(' - ')[1], axis=1)
costmelt['Enduse'] = costmelt.apply(
lambda x: x.name.split(' - ')[0], axis=1)
costmelt['Meter'] = costmelt.apply(
lambda x: x.name.split(' - ')[1], axis=1)
bepsmelt = bepsmelt[list(
bepsmelt.columns[-2:]) + list(bepsmelt.columns[:-2])]
bepumelt = bepumelt[list(
bepumelt.columns[-2:]) + list(bepumelt.columns[:-2])]
costmelt = costmelt[list(
costmelt.columns[-2:]) + list(costmelt.columns[:-2])]
if iscost:
return {
'beps': bepsmelt,
'bepu': bepumelt,
'cost': costmelt
}
else:
return {
'beps': bepsmelt,
'bepu': bepumelt,
}
def annual_summaries(self, writecsv=True, opencsv=True):
'''
Exports the following:
fname_BEPS.csv,
fname_BEPU.csv,
fname_COST.csv,
fname_UNMET.csv,
Also returns dict of Pandas Dataframes
Available Kwargs:
writecsv: Bool
opencsv: Bool
reports:
'''
beps = self.sim.beps()
bepu = self.sim.bepu()
iscost = True
try:
cost = self.annual_cost_enduse()
except:
iscost = False
print(
'Rates have not been defined for this project; cost outputs will not be created.')
unmet_df, cool_ssr, heat_ssr = self.sim.unmet()
if writecsv:
beps_file = self.path + "/" + "__BEPS_"+self.fname+".csv"
bepu_file = self.path + "/" + "__BEPU_"+self.fname+".csv"
unmet_file = self.path + "/" + "__UNMET_"+self.fname+".csv"
beps.to_csv(beps_file, index=False)
bepu.to_csv(bepu_file, index=False)
if iscost:
cost_file = self.path + "/" + "__COST_"+self.fname+".csv"
cost.to_csv(cost_file, index=False)
# UNMET CONCAT
with open(unmet_file, 'w', newline='\n') as f:
unmet_df.to_csv(f)
with open(unmet_file, 'a', newline='\n') as f:
heat_ssr.to_csv(f)
cool_ssr.to_csv(f)
if opencsv:
book = xw.Book(beps_file)
book.close()
book = xw.Book(bepu_file)
book.close()
if iscost:
book = xw.Book(cost_file)
book.close()
book = xw.Book(unmet_file)
book.close()
else:
if iscost:
return beps, bepu, cost
else:
return beps, bepu
def hourly(self):
return self.hsr.df
def hourlyreports(self):
return self.hsr.df.columns
def hourly_results(self):
self.hsr.df.to_csv(self.path + "/" + self.fname + ".csv")
def leed_enduses(self, write_csv=True, open_csv=True, group_meters=True):
leed_psf = self.sim.psf(leedpivot=True)
if group_meters:
leed_psf = leed_psf.T.reset_index().groupby(
['Object', 'level_0']).sum().T.stack()
leed_psf.index.names = ['Enduse', 'Energy or Demand']
colnames = leed_psf.columns.tolist()
leed_psf = pd.melt(leed_psf.reset_index(), id_vars=[
'Enduse', 'Energy or Demand'], value_vars=colnames)
if write_csv:
fname = self.path + "/" + "__LEED_ENDUSES_"+self.fname+".csv"
leed_psf.to_csv(fname)
if open_csv:
book = xw.Book(fname)
book.close()
return leed_psf
def sim_print(self, reportlist, directory="Report Outputs"):
'''
for printing sim files (i.e. for code/LEED submission) tio PDF, returns
new *.SIM with only the items in the reportlist (e.g. ['ES-D', 'BEPS', 'BEPU'])
'''
simpath = self.path + '/' + self.fname + '.SIM'
fdir = self.path + '/' + directory
fname = '_outputs_' + self.fname + '.SIM'
fpath = fdir + '/' + fname
if not os.path.exists(fdir):
os.makedirs(fdir)
if os.path.isfile(fpath):
os.remove(fpath)
with open(simpath) as f:
f_list = f.readlines()
rptstart = []
for num, line in enumerate(f_list, 0):
for r in reportlist:
if r == 'PV-A':
parse_mod = True
else:
parse_mod = False
if r in line:
if parse_mod:
rptstart.append(int(num)-2)
else:
rptstart.append(int(num)-2)
for r in rptstart:
lines = 0
scan = f_list[r+3:(r+1000)]
if lines == 0:
for num, line in enumerate(scan):
rptlen = []
if "REPORT" in line:
rptlen.append(num)
lines = lines + 1
break
rpt_text_list = (f_list[r:(r+rptlen[0]+1)])
if 'PV-A' in rpt_text_list[2] or 'PS-E' in rpt_text_list[2] or 'PS-F' in rpt_text_list[2]:
rpt_text_list[-1] = rpt_text_list[-1][:-2]
with open(fpath, 'a') as output:
for l in rpt_text_list:
output.write(l)
print(
'Successfully Printed Requested Reports to {0}'.format((fpath)))
def annual_cost_enduse(self):
def get_utils(series):
utilcols = []
for s in series:
if "ELECTRICITY" in s:
utilcols.append('Electricity')
if "NATURAL-GAS" in s:
utilcols.append('Natural-Gas')
if "STEAM" in s:
utilcols.append('Steam')
if "CHILLED" in s:
utilcols.append('Chilled-Water')
return utilcols
bepu = self.sim.bepu()
ese = self.sim.ese()
mdict = {}
rate = list(ese.Object)
meters = list(ese.Meters)
for num, mtrlist in enumerate(meters):
for mtr in mtrlist:
mdict[mtr] = rate[num]
rdict = ese.groupby('Object').sum()[
['TOTAL CHARGE ($)', 'METERED ENERGY (KWH)']]
rdict['vrate'] = rdict['TOTAL CHARGE ($)'] / \
rdict['METERED ENERGY (KWH)']
vrate = rdict['vrate'].to_dict()
metervrate = {}
for key, value in mdict.items():
metervrate[key] = vrate[value]
utils = get_utils(bepu.index)
def try_rate(x):
try:
return mdict[x]
except:
return 0
def try_vrate(rate, metervrate):
try:
return metervrate[rate]
except:
if rate == 0:
return 0
else:
print(
'could not find associated vrate from meter: {0}'.format(rate))
bepu['UTILITY'] = utils
bepu.index = [x.replace(" ELECTRICITY", "").replace(" NATURAL-GAS", "").replace(
" STEAM", "").replace(" CHILLED-WATER", "").strip() for x in bepu.index]
bepu['meter'] = bepu.index
bepu['rate'] = bepu['meter'].apply(lambda x: try_rate(x))
bepu['vrate'] = bepu['rate'].apply(lambda x: try_vrate(x, vrate))
bepu['vrate'] = bepu['vrate'].fillna(0)
try:
cost = bepu[['Lights',
'Task Lights',
'Misc Equip',
'Space Heating',
'Space Cooling',
'Heat Reject',
'Pumps & Aux',
'Vent Fans',
'Refrig Display',
'Ht Pump Supplem',
'Domest Hot Wtr',
'Ext Usage',
'Total']].apply(lambda x: x * bepu['vrate'])
cost['Utility'] = utils
cost['File'] = bepu['File']
cost['Rate'] = bepu['rate']
cost['Meter'] = bepu['meter']
cost = cost[[
'File',
'Rate',
'Meter',
'Utility',
'Lights',
'Task Lights',
'Misc Equip',
'Space Heating',
'Space Cooling',
'Heat Reject',
'Pumps & Aux',
'Vent Fans',
'Refrig Display',
'Ht Pump Supplem',
'Domest Hot Wtr',
'Ext Usage',
'Total'
]]
return cost
except:
print('COULDNT\'T PARSE BEPU AND VRATE. CHECK FOR NAN OR -NAN IN RESULTS')
# print (bepu)
# print (sim)
def systemsummaries(self):
ssl = self.sim.ssl()
ssa = self.sim.ssa()
ssl['Month'] = ssl.index
sslsumm = ssl.copy()
sslsumm['Fan Power (kWh)'] = sslsumm['FAN ELEC DURING HEATING (KWH)'] + sslsumm['FAN ELEC DURING COOLING (KWH)'] + \
sslsumm['FAN ELEC DURING FLOATING (KWH)'] - \
sslsumm['FAN ELEC DURING HEAT & COOL KWH)']
sslsumm = sslsumm[['Month', 'Object', 'File', 'Fan Power (kWh)']]
ssasumm = ssa[[
'Month', 'Cooling Energy (MMBtu)', 'Heating Energy (MMBtu)', 'Object', 'File']]
systemsummaries = ssasumm.merge(sslsumm, how='left', left_on=[
'Month', 'Object'], right_on=['Month', 'Object'])
systemsummaries = systemsummaries[[
'Month', 'Cooling Energy (MMBtu)', 'Heating Energy (MMBtu)', 'Object', 'File_x', 'Fan Power (kWh)'
]]
systemsummaries = systemsummaries[[
'Object',
'Month',
'Cooling Energy (MMBtu)',
'Heating Energy (MMBtu)',
'Fan Power (kWh)',
'File_x'
]]
systemsummaries.columns = [
x.replace("File_x", "File") for x in systemsummaries]
return systemsummaries
def monthly_cost_enduse(self):
# only needed for rate parsing by month. otherwise use monthly_enduses
def monthly_vrate_dict(ese):
month_rate_costs = ese.groupby(['Object', 'Month'], sort=False).sum()[
'VIRTUAL RATE ($/UNIT)'].reset_index()
month_rate_cost_dict = {}
for rate in month_rate_costs['Object'].unique():
month_rate_cost_dict[rate] = {}
for month in month_rate_costs['Month'].unique():
month_rate_cost_dict[rate][month] = month_rate_costs[(month_rate_costs['Object'] == rate) & (
month_rate_costs['Month'] == month)]['VIRTUAL RATE ($/UNIT)'].tolist()[0]
return month_rate_cost_dict
ese = self.sim.ese()
psf = self.sim.psf()
vrate_dict = monthly_vrate_dict(ese)
psf = psf[(psf['Cons_Demand'] == 'Consumption')].groupby(
['Object', 'Month'], sort=False).sum().drop('Total', axis=1).reset_index()
enduses = ['Lights', 'Task Lights',
'Misc Equip', 'Space Heating', 'Space Cooling', 'Heat Reject',
'Pumps & Aux', 'Vent Fans', 'Refrig Display', 'Ht Pump Supplem',
'Domest Hot Wtr', 'Ext Usage']
mdict = {}
rate = list(ese.Object)
meters = list(ese.Meters)
for num, mtrlist in enumerate(meters):
for mtr in mtrlist:
mdict[mtr] = rate[num]
def try_rate(x):
try:
return mdict[x]
except:
try:
return mdict[x[0:4]]
except:
return 0
psf['rate'] = psf['Object'].apply(lambda x: try_rate(x))
def try_vrate(x, vrate_dict):
month = x['Month']
rate = x['rate']
try:
byrate = vrate_dict[rate]
bymonth = byrate[month]
return bymonth
except:
return 0
psf['vrate'] = psf.apply(lambda x: try_vrate(x, vrate_dict), axis=1)
cost_monthly_enduse = psf.copy()
for col in cost_monthly_enduse.columns:
try:
cost_monthly_enduse[col] = cost_monthly_enduse[col].astype(
float) * cost_monthly_enduse['vrate'].astype(float)
except:
pass
dflist = []
for meter in cost_monthly_enduse['Object'].unique():
mtrdf = cost_monthly_enduse[cost_monthly_enduse['Object'] == meter]
for use in enduses:
if mtrdf[use].sum() != 0:
series = mtrdf[use]
series.index = mtrdf.Month
series.name = series.name + '-' + meter
dflist.append(mtrdf[use])
cost_df = | pd.DataFrame(dflist) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 12:05:23 2020
@author: haukeh
"""
# import tkinter as tk
# from tkinter import filedialog
import numpy as np
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# root = tk.Tk()
# root.withdraw()
# file_path = filedialog.askopenfilename()
df_eg = pd.read_pickle('data\OSeMBE_ProductionByTechnologyAnnual_DataV3_2020-02-26.pkl')
pathways_eg = df_eg.loc[:,'pathway'].unique()
df_eg['region'] = df_eg['info_1'].apply(lambda x: x[:2])
regions_eg = np.sort(df_eg.loc[:,'region'].unique())
df_ate = pd.read_pickle('data\OSeMBE_AnnualTechnologyEmission_DataV2_2020-02-14.pkl')
df_c2t = df_ate[df_ate['info_2']=='CO2']
pathways_c2t = df_c2t.loc[:,'pathway'].unique()
df_c2t['region'] = df_c2t['info_1'].apply(lambda x: x[:2])
df_c2t['import/domestic'] = df_c2t['info_1'].apply(lambda x: x[6])
df_c2t['fuel_source'] = df_c2t['info_1'].apply(lambda x: x[2:4]+x[6])
df_c2t = df_c2t[(df_c2t['import/domestic']=='I') | (df_c2t['import/domestic']=='X')]
df_c2t['unit'] = 'kt'
regions_c2t = df_c2t['region'].unique()
#%% Dictionary with standard dES colour codes
colours = dict(
coal = 'rgb(0, 0, 0)',
oil = 'rgb(121, 43, 41)',
gas = 'rgb(86, 108, 140)',
nuclear = 'rgb(186, 28, 175)',
waste = 'rgb(138, 171, 71)',
biomass = 'rgb(172, 199, 119)',
biofuel = 'rgb(79, 98, 40)',
hydro = 'rgb(0, 139, 188)',
wind = 'rgb(143, 119, 173)',
solar = 'rgb(230, 175, 0)',
geo = 'rgb(192, 80, 77)',
ocean ='rgb(22, 54, 92)')
#%% dash app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='OSeMBE results'),
html.H2(children='Power generation'),
html.Div([
html.Label('Electricity generation - Pathway 1'),
dcc.Dropdown(
id='pg-pathway-selection-1',
options = [{'label': i, 'value': i} for i in pathways_eg],
value = 'B1C0T0E0'
),
html.Label('Electricity generation - Region/Country 1'),
dcc.Dropdown(
id='pg-region-country-selection-1',
options = [{'label': i, 'value': i} for i in regions_eg],
value = 'EU+CH+NO'
),
dcc.Graph(
id='Power-generation-1'
)
], style={'width': '49%', 'display': 'inline-block'}
),
html.Div([
html.Label('Electricity generation - Pathway 2'),
dcc.Dropdown(
id='pg-pathway-selection-2',
options = [{'label': i, 'value': i} for i in pathways_eg],
value = 'B1C0T0E0'
),
html.Label('Electricity generation - Region/Country 2'),
dcc.Dropdown(
id='pg-region-country-selection-2',
options = [{'label': i, 'value': i} for i in regions_eg],
value = 'EU+CH+NO'
),
dcc.Graph(
id='Power-generation-2'
)
],style = {'width': '49%', 'display': 'inline-block', 'float': 'right'}
),
html.H2(children='Annual CO2 Emission by Technology'),
html.Div([
html.Label('CO2 Emission - Pathway 1'),
dcc.Dropdown(
id='c2t-pathway-selection-1',
options = [{'label': i, 'value': i} for i in pathways_c2t],
value = 'B1C0T0E0'
),
html.Label('CO2 Emission - Country 1'),
dcc.Dropdown(
id='c2t-country-selection-1',
options = [{'label': i, 'value': i} for i in regions_c2t],
value = 'AT'
),
dcc.Graph(
id='c2t-graph-1'
)
], style={'width': '49%', 'display': 'inline-block'}
),
html.Div([
html.Label('CO2 Emission - Pathway 2'),
dcc.Dropdown(
id='c2t-pathway-selection-2',
options = [{'label': i, 'value': i} for i in pathways_c2t],
value = 'B1C0T0E0'
),
html.Label('CO2 Emission - Country 2'),
dcc.Dropdown(
id='c2t-country-selection-2',
options = [{'label': i, 'value': i} for i in regions_c2t],
value = 'AT'
),
dcc.Graph(
id='c2t-graph-2'
)
], style={'width': '49%', 'display': 'inline-block'}
)
])
# app.css.append_css({
# 'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'
# })
@app.callback(
Output('Power-generation-1', 'figure'),
[Input('pg-pathway-selection-1', 'value'),
Input('pg-region-country-selection-1', 'value')])
#%% Function for updating graph
def update_graph_1(selected_pathway, selected_region):
filtered_df = df_eg[(df_eg['pathway'] == selected_pathway) & (df_eg['region'] == selected_region)]
filtered_df_p = filtered_df.pivot(index='year', columns='indicator', values='value')
years = filtered_df['year'].unique()
traces = []
fuel_short = pd.DataFrame({'fuel_name':['Wind','Hydro','Biofuel liquid','Coal','Biomass solid','Waste non renewable','Oil','Nuclear','Natural gas / non renew.','Ocean','Geothermal','Solar'],'fuel_abr':['wind','hydro','biofuel','coal','biomass','waste','oil','nuclear','gas','ocean','geo','solar']}, columns = ['fuel_name','fuel_abr'])
#%% Facts dict
info_dict = {}
info_dict['Filename'] = ['{}_OSeMBE_plot_generation' .format(pd.to_datetime('today').strftime("%Y-%m-%d"))]
info_dict['Unit'] = filtered_df.loc[:,'unit'].unique()
info_dict['Pathway'] = filtered_df.loc[:,'pathway'].unique()
info_dict['Year'] = filtered_df.loc[:,'year'].unique().tolist()
info_dict['Y-Axis'] = ['{}'.format(*info_dict['Unit'])]
fuels = np.sort(filtered_df['indicator'].unique())
for i in fuels:
temp = fuel_short.loc[fuel_short['fuel_name']==i,'fuel_abr']
fuel_code = temp.iloc[0]
traces.append(dict(
x = years,
y = filtered_df_p.loc[:,i],
hoverinfo='x+y',
mode='lines',
line=dict(width=0.5,
color=colours[fuel_code]),
stackgroup='one',
name=i
))
return {
'data': traces,
'layout': dict(
title='Electricity generation in {} in scenario {}'.format(selected_region,selected_pathway),
yaxis=dict(title=''.join(info_dict['Y-Axis'])),
font=dict(family='Aleo'),
)
}
@app.callback(
Output('Power-generation-2', 'figure'),
[Input('pg-pathway-selection-2', 'value'),
Input('pg-region-country-selection-2', 'value')])
#%% Function for updating graph
def update_graph_2(selected_pathway, selected_region):
filtered_df = df_eg[(df_eg['pathway'] == selected_pathway) & (df_eg['region'] == selected_region)]
filtered_df_p = filtered_df.pivot(index='year', columns='indicator', values='value')
years = filtered_df['year'].unique()
traces = []
fuel_short = pd.DataFrame({'fuel_name':['Wind','Hydro','Biofuel liquid','Coal','Biomass solid','Waste non renewable','Oil','Nuclear','Natural gas / non renew.','Ocean','Geothermal','Solar'],'fuel_abr':['wind','hydro','biofuel','coal','biomass','waste','oil','nuclear','gas','ocean','geo','solar']}, columns = ['fuel_name','fuel_abr'])
#%% Facts dict
info_dict = {}
info_dict['Filename'] = ['{}_OSeMBE_plot_generation' .format(pd.to_datetime('today').strftime("%Y-%m-%d"))]
info_dict['Unit'] = filtered_df.loc[:,'unit'].unique()
info_dict['Pathway'] = filtered_df.loc[:,'pathway'].unique()
info_dict['Year'] = filtered_df.loc[:,'year'].unique().tolist()
info_dict['Y-Axis'] = ['{}'.format(*info_dict['Unit'])]
fuels = np.sort(filtered_df['indicator'].unique())
for i in fuels:
temp = fuel_short.loc[fuel_short['fuel_name']==i,'fuel_abr']
fuel_code = temp.iloc[0]
traces.append(dict(
x = years,
y = filtered_df_p.loc[:,i],
hoverinfo='x+y',
mode='lines',
line=dict(width=0.5,
color=colours[fuel_code]),
stackgroup='one',
name=i
))
return {
'data': traces,
'layout': dict(
title='Electricity generation in {} in scenario {}'.format(selected_region,selected_pathway),
yaxis=dict(title=''.join(info_dict['Y-Axis'])),
font=dict(family='Aleo'),
)
}
@app.callback(
Output('c2t-graph-1', 'figure'),
[Input('c2t-pathway-selection-1', 'value'),
Input('c2t-country-selection-1', 'value')])
#%% Function for updating graph
def update_graph_3(selected_pathway, selected_region):
# selected_pathway = 'B0C0T0E0'
# selected_region = 'DE'
filtered_df = df_c2t[(df_c2t['pathway'] == selected_pathway) & (df_c2t['region'] == selected_region)]
filtered_df_p = filtered_df.pivot(index='year', columns='fuel_source', values='value')
years = filtered_df['year'].unique()
traces = []
fuel_short = pd.DataFrame({'fuel_name':['BFI','BFX','BMI','BMX','COI','COX','GOX','HFI','NGI','NGX','OII','OIX','URI','WSX'],'fuel_abr':['biofuel','biofuel','biomass','biomass','coal','coal','geo','oil','gas','gas','oil','oil','nuclear','waste']}, columns = ['fuel_name','fuel_abr'])
#%% Facts dict
info_dict = {}
info_dict['Filename'] = ['{}_OSeMBE_plot_emission' .format(pd.to_datetime('today').strftime("%Y-%m-%d"))]
info_dict['Unit'] = filtered_df.loc[:,'unit'].unique()
info_dict['Pathway'] = filtered_df.loc[:,'pathway'].unique()
info_dict['Year'] = filtered_df.loc[:,'year'].unique().tolist()
info_dict['Y-Axis'] = ['{}'.format(*info_dict['Unit'])]
fuels = np.sort(filtered_df['fuel_source'].unique())
for i in fuels:
temp = fuel_short.loc[fuel_short['fuel_name']==i,'fuel_abr']
fuel_code = temp.iloc[0]
traces.append(dict(
x = years,
y = filtered_df_p.loc[:,i],
hoverinfo='x+y',
mode='lines',
line=dict(width=0.5,
color=colours[fuel_code]),
stackgroup='one',
name=i
))
return {
'data': traces,
'layout': dict(
title='CO2 Emissions in {} in scenario {}'.format(selected_region,selected_pathway),
yaxis=dict(title=''.join(info_dict['Y-Axis'])),
font=dict(family='Aleo'),
)
}
@app.callback(
Output('c2t-graph-2', 'figure'),
[Input('c2t-pathway-selection-2', 'value'),
Input('c2t-country-selection-2', 'value')])
#%% Function for updating graph
def update_graph_4(selected_pathway, selected_region):
# selected_pathway = 'B0C0T0E0'
# selected_region = 'DE'
filtered_df = df_c2t[(df_c2t['pathway'] == selected_pathway) & (df_c2t['region'] == selected_region)]
filtered_df_p = filtered_df.pivot(index='year', columns='fuel_source', values='value')
years = filtered_df['year'].unique()
traces = []
fuel_short = | pd.DataFrame({'fuel_name':['BFI','BFX','BMI','BMX','COI','COX','GOX','HFI','NGI','NGX','OII','OIX','URI','WSX'],'fuel_abr':['biofuel','biofuel','biomass','biomass','coal','coal','geo','oil','gas','gas','oil','oil','nuclear','waste']}, columns = ['fuel_name','fuel_abr']) | pandas.DataFrame |
#! /usr/bin/env python3.6
'''
Author : Coslate
Date : 2018/07/07
Description :
This program will examine the input excel whether have the job number, and
concatenate it to the total check excel output. It will also highlight the
one that has repeated job number in multiple input excels.
'''
import argparse
import numpy as np
import pandas as pd
from pandas import ExcelWriter
import ntpath
import shutil
import os
import sys
import datetime
#########################
# Main-Routine #
#########################
def main():
(file_name_path, total_num, org_checked_file) = ArgumentParser()
#Get the input excel file data
file_name = ntpath.basename(file_name_path)
file_data = pd.read_excel(file_name_path)
original_check, total_num_origin = ProcessOrigCheckFile(org_checked_file, total_num)
#Build the new dataframe
total_check = original_check.copy()
check_num_list = file_data["工號"].values.tolist()
total_check[file_name] = [0 for x in range(0, total_num_origin)]
columns = total_check.columns.tolist()
for check_num in range(0, total_num):
total_check['工號'][check_num] = check_num
if(check_num > (total_num_origin-1)):
break
elif(check_num in check_num_list):
total_check['做過健檢'][check_num] = 1
total_check[file_name][check_num] = 1
total_check = total_check.drop(total_check.index[total_num:total_num_origin:1])
if(total_num > total_num_origin):
for check_num in range(total_num_origin, total_num):
total_check.loc[check_num] = [0 for x in range(len(columns))]
total_check['工號'][check_num] = check_num
#Write out to the new excel(total checked excel)
writer = ExcelWriter(org_checked_file)
total_check.to_excel(writer,'Checkup',index=False)
workbook = writer.book
worksheet = writer.sheets['Checkup']
HightLightRepeat(total_check, workbook, worksheet, columns)
writer.save()
#########################
# Sub-Routine #
#########################
def ArgumentParser():
total_num = 0
file_name_path = ""
org_checked_file = ""
parser = argparse.ArgumentParser()
parser.add_argument("--file_name_path" , "-file_to_check" , help="The name of the input excel to do the examining.")
parser.add_argument("--org_checked_file" , "-org_checked_file" , help="The name of the original checked file.")
parser.add_argument("--total_num" , "-total_job_number" , help="The maximum number of the job number.")
args = parser.parse_args()
if args.file_name_path:
file_name_path = args.file_name_path
if args.org_checked_file:
org_checked_file = args.org_checked_file
if args.total_num:
total_num = int(args.total_num)
return (file_name_path, total_num, org_checked_file)
def HightLightRepeat(in_data_frame, workbook, worksheet, columns):
positiveFormat = workbook.add_format({
'bold': 'true',
'font_color': 'red'
})
columns_num = len(columns)
for index, row in in_data_frame.iterrows():
repeat_num = 0
for x in range(2, columns_num):
if(row[columns[x]] == 1):
repeat_num += 1
if(repeat_num > 1):
worksheet.set_row(index+1, 15, positiveFormat)
def ProcessOrigCheckFile(org_checked_file, total_num):
if os.path.exists(org_checked_file):
#Get the original total checked excel
original_check = | pd.read_excel(org_checked_file) | pandas.read_excel |
from sklearn.svm import SVR
from sklearn.dummy import DummyRegressor
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn import model_selection
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
PATH_AS_RELATIONSHIPS = '../../Datasets/AS-relationships/20210701.as-rel2.txt'
DEEPWALK_EMBEDDINGS_128 = '../Embeddings/DeepWalk_128.csv'
DIFF2VEC_EMBEDDINGS_128 = '../Embeddings/Diff2Vec_128.csv'
NETMF_EMBEDDINGS_128 = '../Embeddings/NetMF_128.csv'
NODESKETCH_EMBEDDINGS_128 = '../Embeddings/NodeSketch_128.csv'
WALKLETS_EMBEDDINGS_256 = '../Embeddings/Walklets_256.csv'
NODE2VEC_EMBEDDINGS_64 = '../Embeddings/Node2Vec_embeddings.emb'
NODE2VEC_LOCAL_EMBEDDINGS_64 = '../Embeddings/Node2Vec_p2_64.csv'
NODE2VEC_GLOBAL_EMBEDDINGS_64 = '../Embeddings/Node2Vec_q2_64.csv'
DIFF2VEC_EMBEDDINGS_64 = '../Embeddings/Diff2Vec_64.csv'
NETMF_EMBEDDINGS_64 = '../Embeddings/NetMF_64.csv'
NODESKETCH_EMBEDDINGS_64 = '../Embeddings/NodeSketch_64.csv'
WALKLETS_EMBEDDINGS_128 = '../Embeddings/Walklets_128.csv'
NODE2VEC_WL5_E3_LOCAL = '../Embeddings/Node2Vec_64_wl5_ws2_ep3_local.csv'
NODE2VEC_WL5_E3_GLOBAL = '../Embeddings/Node2Vec_64_wl5_ws2_ep3_global.csv'
NODE2VEC_64_WL5_E1_GLOBAL = '../Embeddings/Node2Vec_64_wl5_ws2_global.csv'
BGP2VEC_64 = '../Embeddings/Node2Vec_bgp2Vec.csv'
NODE2VEC_32_WL6_WN40_EP3 = '../Embeddings/Node2Vec_32_wl6_ws5_ep3_wn40_p2_q05.csv'
BGP2VEC_32 = '../Embeddings/BGP2VEC_32'
BGP2VEC_32_WS5 = '../Embeddings/BGP2Vec_32_wl6_ws5_ep3_wn40_p4_q05.csv'
karate_club_emb_64 = ['Diff2Vec', 'NetMF', 'NodeSketch', 'Walklets', 'Node2Vec_Local', 'Node2Vec_Global', 'Node2Vec_wl5_global', 'Node2Vec_wl5_e3_global', 'Node2Vec_wl5_e3_local', 'bgp2vec_64', 'bgp2vec_32', 'bgp2vec_32_ws5', 'node2vec_32_wl6_wn40_e3']
karate_club_emb_128 = ['Diff2Vec', 'NetMF', 'NodeSketch', 'Walklets', 'DeepWalk']
graph_emb_dimensions = 64
def read_karateClub_embeddings_file(emb, dimensions):
"""
Karateclub library requires nodes to be named with consecutive Integer numbers. In the end gives as an output
containing the embeddings in ascending order. So in this function we need to reassign each ASN to its own embedding.
:param emb: A dataset containing pretrained embeddings
:param dimensions: The dimensions of the given dataset
:return: A dataframe containing pretrained embeddings
"""
if dimensions == 64:
if emb == 'Diff2Vec':
df = pd.read_csv(DIFF2VEC_EMBEDDINGS_64, sep=',')
elif emb == 'NetMF':
df = pd.read_csv(NETMF_EMBEDDINGS_64, sep=',')
elif emb == 'NodeSketch':
df = pd.read_csv(NODESKETCH_EMBEDDINGS_64, sep=',')
elif emb == 'Walklets':
df = pd.read_csv(WALKLETS_EMBEDDINGS_128, sep=',')
elif emb == 'Node2Vec_Local':
df = pd.read_csv(NODE2VEC_LOCAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_Global':
df = pd.read_csv(NODE2VEC_GLOBAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_wl5_global':
df = pd.read_csv(NODE2VEC_64_WL5_E1_GLOBAL, sep=',')
elif emb == 'Node2Vec_wl5_e3_global':
df = pd.read_csv(NODE2VEC_WL5_E3_GLOBAL, sep=',')
elif emb == 'Node2Vec_wl5_e3_local':
df = pd.read_csv(NODE2VEC_WL5_E3_LOCAL, sep=',')
elif emb == 'bgp2vec_64':
df = pd.read_csv(BGP2VEC_64, sep=',')
elif emb == 'bgp2vec_32':
df = pd.read_csv(BGP2VEC_32, sep=',')
elif emb == 'bgp2vec_32_ws5':
df = pd.read_csv(BGP2VEC_32_WS5, sep=',')
elif emb == 'node2vec_32_wl6_wn40_e3':
df = pd.read_csv(BGP2VEC_32_WS5, sep=',')
else:
raise Exception('Not defined dataset')
else:
if emb == 'Diff2Vec':
df = pd.read_csv(DIFF2VEC_EMBEDDINGS_128, sep=',')
elif emb == 'NetMF':
df = pd.read_csv(NETMF_EMBEDDINGS_128, sep=',')
elif emb == 'NodeSketch':
df = pd.read_csv(NODESKETCH_EMBEDDINGS_128, sep=',')
elif emb == 'Walklets':
df = | pd.read_csv(WALKLETS_EMBEDDINGS_256, sep=',') | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
lreshape,
melt,
wide_to_long,
)
import pandas._testing as tm
class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df["id1"] = (self.df["A"] > 0).astype(np.int64)
self.df["id2"] = (self.df["B"] > 0).astype(np.int64)
self.var_name = "var"
self.value_name = "val"
self.df1 = DataFrame(
[
[1.067683, -1.110463, 0.20867],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361],
]
)
self.df1.columns = [list("ABC"), list("abc")]
self.df1.columns.names = ["CAP", "low"]
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(), melt(self.df))
tm.assert_frame_equal(
self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(self.df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
self.df.melt(var_name=self.var_name, value_name=self.value_name),
melt(self.df, var_name=self.var_name, value_name=self.value_name),
)
tm.assert_frame_equal(self.df1.melt(col_level=0), melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = self.df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = self.df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self):
result3 = self.df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame(
{
("A", "a"): self.df1[("A", "a")],
"CAP": ["B"] * len(self.df1),
"low": ["b"] * len(self.df1),
"value": self.df1[("B", "b")],
},
columns=[("A", "a"), "CAP", "low", "value"],
)
result = self.df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"id_vars, value_vars, col_level, expected",
[
(
["A"],
["B"],
0,
DataFrame(
{
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"CAP": {0: "B", 1: "B", 2: "B"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
(
["a"],
["b"],
1,
DataFrame(
{
"a": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"low": {0: "b", 1: "b", 2: "b"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
],
)
def test_single_vars_work_with_multiindex(
self, id_vars, value_vars, col_level, expected
):
result = self.df1.melt(id_vars, value_vars, col_level=col_level)
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ("A", "a")
list_a = [tuple_a]
tuple_b = ("B", "b")
list_b = [tuple_b]
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
for id_vars, value_vars in (
(tuple_a, list_b),
(list_a, tuple_b),
(tuple_a, tuple_b),
):
with pytest.raises(ValueError, match=msg):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ["var", "value"]
result6 = self.df.melt(id_vars=["id1"], var_name=self.var_name)
assert result6.columns.tolist() == ["id1", "var", "value"]
result7 = self.df.melt(id_vars=["id1", "id2"], var_name=self.var_name)
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
result8 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", var_name=self.var_name
)
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
result9 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=self.var_name
)
expected9 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, "value"],
)
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ["variable", "val"]
result11 = self.df.melt(id_vars=["id1"], value_name=self.value_name)
assert result11.columns.tolist() == ["id1", "variable", "val"]
result12 = self.df.melt(id_vars=["id1", "id2"], value_name=self.value_name)
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
result13 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", value_name=self.value_name
)
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
result14 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=self.value_name
)
expected14 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", self.value_name],
)
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name, value_name=self.value_name)
assert result15.columns.tolist() == ["var", "val"]
result16 = self.df.melt(
id_vars=["id1"], var_name=self.var_name, value_name=self.value_name
)
assert result16.columns.tolist() == ["id1", "var", "val"]
result17 = self.df.melt(
id_vars=["id1", "id2"], var_name=self.var_name, value_name=self.value_name
)
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
result18 = self.df.melt(
id_vars=["id1", "id2"],
value_vars="A",
var_name=self.var_name,
value_name=self.value_name,
)
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
result19 = self.df.melt(
id_vars=["id1", "id2"],
value_vars=["A", "B"],
var_name=self.var_name,
value_name=self.value_name,
)
expected19 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, self.value_name],
)
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = "foo"
result20 = df20.melt()
assert result20.columns.tolist() == ["foo", "value"]
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level="CAP")
assert res1.columns.tolist() == ["CAP", "value"]
assert res2.columns.tolist() == ["CAP", "value"]
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ["CAP", "low", "value"]
@pytest.mark.parametrize(
"col",
[
pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")),
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
pd.Series([0, 1, 0, 0, 0]),
],
)
def test_pandas_dtypes(self, col):
# GH 15785
df = DataFrame(
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
)
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
result = melt(
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
)
expected = DataFrame(
{
0: list(range(5)) * 2,
1: pd.concat([col] * 2, ignore_index=True),
2: ["attr1"] * 5 + ["attr2"] * 5,
3: expected_value,
}
)
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
def test_preserve_category(self):
# GH 15853
data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
result = melt(data, ["B"], ["A"])
expected = DataFrame(
{"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
# attempted with column names absent from the dataframe
# Generate data
df = DataFrame(np.random.randn(5, 4), columns=list("abcd"))
# Try to melt with missing `value_vars` column name
msg = "The following '{Var}' are not present in the DataFrame: {Col}"
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['C'\\]")
):
df.melt(["a", "b"], ["C", "d"])
# Try to melt with missing `id_vars` column name
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['A'\\]")):
df.melt(["A", "b"], ["c", "d"])
# Multiple missing
with pytest.raises(
KeyError,
match=msg.format(Var="id_vars", Col="\\['not_here', 'or_there'\\]"),
):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
multi = df.copy()
multi.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['E'\\]")):
multi.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['F'\\]")
):
multi.melt(["A"], ["F"], col_level=0)
def test_melt_mixed_int_str_id_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]})
result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"])
expected = DataFrame(
{0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_mixed_int_str_value_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"]})
result = melt(df, value_vars=[0, "a"])
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
tm.assert_frame_equal(result, expected)
def test_ignore_index(self):
# GH 17440
df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
result = melt(df, ignore_index=False)
expected = DataFrame(
{"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
)
tm.assert_frame_equal(result, expected)
def test_ignore_multiindex(self):
# GH 17440
index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")], names=["baz", "foobar"]
)
df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"]
)
expected = DataFrame(
{"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_ignore_index_name_and_type(self):
# GH 17440
index = pd.Index(["foo", "bar"], dtype="category", name="baz")
df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz")
expected = DataFrame(
{"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_melt_with_duplicate_columns(self):
# GH#41951
df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"])
result = df.melt(id_vars=["a"], value_vars=["b"])
expected = DataFrame(
[["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"]
)
tm.assert_frame_equal(result, expected)
class TestLreshape:
def test_pairs(self):
data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [1766, 3301, 1454, 3139, 4133],
"id": [101, 102, 103, 104, 105],
"sex": ["Male", "Female", "Female", "Female", "Female"],
"visitdt1": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
],
"visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"],
"visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"],
"wt1": [1823, 3338, 1549, 3298, 4306],
"wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0],
"wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0],
}
df = DataFrame(data)
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 4)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
result = lreshape(df, spec)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
1454,
3139,
4133,
1766,
3139,
4133,
],
"id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
1892.0,
3338.0,
4575.0,
2293.0,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
],
"id": [
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
np.nan,
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
np.nan,
np.nan,
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
np.nan,
1892.0,
3338.0,
4575.0,
2293.0,
np.nan,
np.nan,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = lreshape(df, spec, dropna=False, label="foo")
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 3)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
msg = "All column lists must be same length"
with pytest.raises(ValueError, match=msg):
lreshape(df, spec)
class TestWideToLong:
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A1970": {0: "a", 1: "b", 2: "c"},
"A1980": {0: "d", 1: "e", 2: "f"},
"B1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_stubs(self):
# GH9204 wide_to_long call should not modify 'stubs' list
df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ["id", "inc1", "inc2", "edu1", "edu2"]
stubs = ["inc", "edu"]
wide_to_long(df, stubs, i="id", j="age")
assert stubs == ["inc", "edu"]
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A.1970": {0: "a", 1: "b", 2: "c"},
"A.1980": {0: "d", 1: "e", 2: "f"},
"B.1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B.1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(result, expected)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A(quarterly)1970": {0: "a", 1: "b", 2: "c"},
"A(quarterly)1980": {0: "d", 1: "e", 2: "f"},
"B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A(quarterly)": ["a", "b", "c", "d", "e", "f"],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[
["X", "A(quarterly)", "B(quarterly)"]
]
result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": ["X1", "X2", "X1", "X2"],
"A": [1.0, 2.0, 3.0, 4.0],
"B": [5.0, 6.0, np.nan, np.nan],
"id": [0, 1, 0, 1],
"year": [2010, 2010, 2011, 2011],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = DataFrame(
{
"A11": ["a11", "a22", "a33"],
"A12": ["a21", "a22", "a23"],
"B11": ["b11", "b12", "b13"],
"B12": ["b21", "b22", "b23"],
"BB11": [1, 2, 3],
"BB12": [4, 5, 6],
"BBBX": [91, 92, 93],
"BBBZ": [91, 92, 93],
}
)
df["id"] = df.index
expected = DataFrame(
{
"BBBX": [91, 92, 93, 91, 92, 93],
"BBBZ": [91, 92, 93, 91, 92, 93],
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
"BB": [1, 2, 3, 4, 5, 6],
"id": [0, 1, 2, 0, 1, 2],
"year": [11, 11, 11, 12, 12, 12],
}
)
expected = expected.set_index(["id", "year"])[["BBBX", "BBBZ", "A", "B", "BB"]]
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = "nope!"
df = DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": "",
"A2010": [],
"A2011": [],
"B2010": [],
"id": [],
"year": [],
"A": [],
"B": [],
}
expected = DataFrame(exp_data).astype({"year": "int"})
expected = expected.set_index(["id", "year"])[
["X", "A2010", "A2011", "B2010", "A", "B"]
]
expected.index = expected.index.set_levels([0, 1], level=0)
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep)
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = DataFrame(
{
"A11": ["a11", "a22", "a33"],
"A12": ["a21", "a22", "a23"],
"B11": ["b11", "b12", "b13"],
"B12": ["b21", "b22", "b23"],
"BB11": [1, 2, 3],
"BB12": [4, 5, 6],
"Arating": [91, 92, 93],
"Arating_old": [91, 92, 93],
}
)
df["id"] = df.index
expected = DataFrame(
{
"Arating": [91, 92, 93, 91, 92, 93],
"Arating_old": [91, 92, 93, 91, 92, 93],
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
"BB": [1, 2, 3, 4, 5, 6],
"id": [0, 1, 2, 0, 1, 2],
"year": [11, 11, 11, 12, 12, 12],
}
)
expected = expected.set_index(["id", "year"])[
["Arating", "Arating_old", "A", "B", "BB"]
]
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = DataFrame(
{
"Aone": [1.0, 2.0],
"Atwo": [3.0, 4.0],
"Bone": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": "",
"Aone": [],
"Atwo": [],
"Bone": [],
"id": [],
"year": [],
"A": [],
"B": [],
}
expected = DataFrame(exp_data).astype({"year": "int"})
expected = expected.set_index(["id", "year"])
expected.index = expected.index.set_levels([0, 1], level=0)
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
df = DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
expected = DataFrame(
{
"ht": [
2.8,
3.4,
2.9,
3.8,
2.2,
2.9,
2.0,
3.2,
1.8,
2.8,
1.9,
2.4,
2.2,
3.3,
2.3,
3.4,
2.1,
2.9,
],
"famid": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
"birth": [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
"age": [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
}
)
expected = expected.set_index(["famid", "birth", "age"])[["ht"]]
result = wide_to_long(df, "ht", i=["famid", "birth"], j="age")
tm.assert_frame_equal(result, expected)
def test_non_unique_idvars(self):
# GH16382
# Raise an error message if non unique id vars (i) are passed
df = DataFrame(
{"A_A1": [1, 2, 3, 4, 5], "B_B1": [1, 2, 3, 4, 5], "x": [1, 1, 1, 1, 1]}
)
msg = "the id variables need to uniquely identify each row"
with pytest.raises(ValueError, match=msg):
wide_to_long(df, ["A_A", "B_B"], i="x", j="colname")
def test_cast_j_int(self):
df = DataFrame(
{
"actor_1": ["CCH Pounder", "<NAME>", "<NAME>"],
"actor_2": ["<NAME>", "<NAME>", "<NAME>"],
"actor_fb_likes_1": [1000.0, 40000.0, 11000.0],
"actor_fb_likes_2": [936.0, 5000.0, 393.0],
"title": ["Avatar", "Pirates of the Caribbean", "Spectre"],
}
)
expected = DataFrame(
{
"actor": [
"CCH Pounder",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"actor_fb_likes": [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0],
"num": [1, 1, 1, 2, 2, 2],
"title": [
"Avatar",
"Pirates of the Caribbean",
"Spectre",
"Avatar",
"Pirates of the Caribbean",
"Spectre",
],
}
).set_index(["title", "num"])
result = wide_to_long(
df, ["actor", "actor_fb_likes"], i="title", j="num", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_identical_stubnames(self):
df = DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"A": ["X1", "X2"],
}
)
msg = "stubname can't be identical to a column name"
with pytest.raises(ValueError, match=msg):
wide_to_long(df, ["A", "B"], i="A", j="colname")
def test_nonnumeric_suffix(self):
df = DataFrame(
{
"treatment_placebo": [1.0, 2.0],
"treatment_test": [3.0, 4.0],
"result_placebo": [5.0, 6.0],
"A": ["X1", "X2"],
}
)
expected = DataFrame(
{
"A": ["X1", "X2", "X1", "X2"],
"colname": ["placebo", "placebo", "test", "test"],
"result": [5.0, 6.0, np.nan, np.nan],
"treatment": [1.0, 2.0, 3.0, 4.0],
}
)
expected = expected.set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix="[a-z]+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_mixed_type_suffix(self):
df = DataFrame(
{
"A": ["X1", "X2"],
"result_1": [0, 9],
"result_foo": [5.0, 6.0],
"treatment_1": [1.0, 2.0],
"treatment_foo": [3.0, 4.0],
}
)
expected = DataFrame(
{
"A": ["X1", "X2", "X1", "X2"],
"colname": ["1", "1", "foo", "foo"],
"result": [0.0, 9.0, 5.0, 6.0],
"treatment": [1.0, 2.0, 3.0, 4.0],
}
).set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix=".+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_float_suffix(self):
df = DataFrame(
{
"treatment_1.1": [1.0, 2.0],
"treatment_2.1": [3.0, 4.0],
"result_1.2": [5.0, 6.0],
"result_1": [0, 9],
"A": ["X1", "X2"],
}
)
expected = DataFrame(
{
"A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"],
"colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1],
"result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan],
"treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0],
}
)
expected = expected.set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix="[0-9.]+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_col_substring_of_stubname(self):
# GH22468
# Don't raise ValueError when a column name is a substring
# of a stubname that's been passed as a string
wide_data = {
"node_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4},
"A": {0: 0.80, 1: 0.0, 2: 0.25, 3: 1.0, 4: 0.81},
"PA0": {0: 0.74, 1: 0.56, 2: 0.56, 3: 0.98, 4: 0.6},
"PA1": {0: 0.77, 1: 0.64, 2: 0.52, 3: 0.98, 4: 0.67},
"PA3": {0: 0.34, 1: 0.70, 2: 0.52, 3: 0.98, 4: 0.67},
}
wide_df = | DataFrame.from_dict(wide_data) | pandas.DataFrame.from_dict |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if | is_scalar(result) | pandas.core.dtypes.common.is_scalar |
import argparse
import sys
import time
sys.path.insert(0, 'catboost/catboost/python-package')
import ml_dataset_loader.datasets as data_loader
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.model_selection import train_test_split
# Global parameters
random_seed = 0
max_depth = 6
learning_rate = 0.1
min_split_loss = 0
min_weight = 1
l1_reg = 0
l2_reg = 1
class Data:
def __init__(self, X, y, name, task, metric, train_size=0.6, validation_size=0.2,
test_size=0.2):
assert (train_size + validation_size + test_size) == 1.0
self.name = name
self.task = task
self.metric = metric
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y,
test_size=test_size,
random_state=random_seed)
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(self.X_train,
self.y_train,
test_size=validation_size / (1 - test_size),
random_state=random_seed)
assert (self.X_train.shape[0] + self.X_val.shape[0] + self.X_test.shape[0]) == X.shape[0]
def eval(data, pred):
if data.metric == "RMSE":
return np.sqrt(mean_squared_error(data.y_test, pred))
elif data.metric == "Accuracy":
# Threshold prediction if binary classification
if data.task == "Classification":
pred = pred > 0.5
elif data.task == "Multiclass classification":
if pred.ndim > 1:
pred = np.argmax(pred, axis=1)
return accuracy_score(data.y_test, pred)
else:
raise ValueError("Unknown metric: " + data.metric)
def add_data(df, algorithm, data, elapsed, metric):
time_col = (data.name, 'Time(s)')
metric_col = (data.name, data.metric)
try:
df.insert(len(df.columns), time_col, '-')
df.insert(len(df.columns), metric_col, '-')
except:
pass
df.at[algorithm, time_col] = elapsed
df.at[algorithm, metric_col] = metric
def configure_xgboost(data, use_gpu, args):
params = {'max_depth': max_depth,
'learning_rate': learning_rate, 'n_gpus': args.n_gpus, 'min_split_loss': min_split_loss,
'min_child_weight': min_weight, 'alpha': l1_reg, 'lambda': l2_reg, 'debug_verbose':args.debug_verbose}
if use_gpu:
params['tree_method'] = 'gpu_hist'
else:
params['tree_method'] = 'hist'
if data.task == "Regression":
params["objective"] = "reg:linear"
if use_gpu:
params["objective"] = "gpu:" + params["objective"]
elif data.task == "Multiclass classification":
params["objective"] = "multi:softmax"
params["num_class"] = np.max(data.y_test) + 1
elif data.task == "Classification":
params["objective"] = "binary:logistic"
if use_gpu:
params["objective"] = "gpu:" + params["objective"]
else:
raise ValueError("Unknown task: " + data.task)
return params
def configure_lightgbm(data, use_gpu):
params = {
'task': 'train',
'boosting_type': 'gbdt',
'max_depth': max_depth,
'num_leaves': 2 ** 8,
'learning_rate': learning_rate, 'min_data_in_leaf': 0,
'min_sum_hessian_in_leaf': 1, 'lambda_l2': 1, 'min_split_gain': min_split_loss,
'min_child_weight': min_weight, 'lambda_l1': l1_reg, 'lambda_l2': l2_reg}
if use_gpu:
params["device"] = "gpu"
if data.task == "Regression":
params["objective"] = "regression"
elif data.task == "Multiclass classification":
params["objective"] = "multiclass"
params["num_class"] = np.max(data.y_test) + 1
elif data.task == "Classification":
params["objective"] = "binary"
else:
raise ValueError("Unknown task: " + data.task)
return params
def configure_catboost(data, use_gpu, args):
if int(args.n_gpus) == -1:
dev_arr = "-1"
else:
dev_arr = [i for i in range(0, int(args.n_gpus))]
params = {'learning_rate': learning_rate, 'depth': max_depth, 'l2_leaf_reg': l2_reg, 'devices' : dev_arr}
if use_gpu:
params['task_type'] = 'GPU'
if data.task == "Multiclass classification":
params['loss_function'] = 'MultiClass'
params["classes_count"] = np.max(data.y_test) + 1
params["eval_metric"] = 'MultiClass'
return params
def run_xgboost(data, params, args):
dtrain = xgb.DMatrix(data.X_train, data.y_train)
dval = xgb.DMatrix(data.X_val, data.y_val)
dtest = xgb.DMatrix(data.X_test, data.y_test)
start = time.time()
bst = xgb.train(params, dtrain, args.num_rounds, [(dtrain, "train"), (dval, "val")])
elapsed = time.time() - start
pred = bst.predict(dtest)
metric = eval(data, pred)
return elapsed, metric
def train_xgboost(alg, data, df, args):
if alg not in args.algs:
return
use_gpu = True if 'gpu' in alg else False
params = configure_xgboost(data, use_gpu, args)
elapsed, metric = run_xgboost(data, params, args)
add_data(df, alg, data, elapsed, metric)
def run_lightgbm(data, params, args):
import lightgbm as lgb
lgb_train = lgb.Dataset(data.X_train, data.y_train)
lgb_eval = lgb.Dataset(data.X_test, data.y_test, reference=lgb_train)
start = time.time()
gbm = lgb.train(params,
lgb_train,
num_boost_round=args.num_rounds,
valid_sets=lgb_eval)
elapsed = time.time() - start
pred = gbm.predict(data.X_test)
metric = eval(data, pred)
return elapsed, metric
def train_lightgbm(alg, data, df, args):
if alg not in args.algs:
return
use_gpu = True if 'gpu' in alg else False
params = configure_lightgbm(data, use_gpu)
elapsed, metric = run_lightgbm(data, params, args)
add_data(df, alg, data, elapsed, metric)
def run_catboost(data, params, args):
import catboost as cat
cat_train = cat.Pool(data.X_train, data.y_train)
cat_test = cat.Pool(data.X_test, data.y_test)
cat_val = cat.Pool(data.X_val, data.y_val)
params['iterations'] = args.num_rounds
if data.task is "Regression":
model = cat.CatBoostRegressor(**params)
else:
model = cat.CatBoostClassifier(**params)
start = time.time()
model.fit(cat_train, use_best_model=False, eval_set=cat_val)
elapsed = time.time() - start
if data.task == "Multiclass classification":
preds = model.predict_proba(cat_test)
else:
preds = model.predict(cat_test)
metric = eval(data, preds)
return elapsed, metric
def train_catboost(alg, data, df, args):
if alg not in args.algs:
return
use_gpu = True if 'gpu' in alg else False
# catboost GPU does not work with multiclass
if data.task == "Multiclass classification" and use_gpu:
add_data(df, alg, data, 'N/A', 'N/A')
return
params = configure_catboost(data, use_gpu, args)
elapsed, metric = run_catboost(data, params, args)
add_data(df, alg, data, elapsed, metric)
class Experiment:
def __init__(self, data_func, name, task, metric):
self.data_func = data_func
self.name = name
self.task = task
self.metric = metric
def run(self, df, args):
X, y = self.data_func(num_rows=args.rows)
data = Data(X, y, self.name, self.task, self.metric)
train_xgboost('xgb-cpu-hist', data, df, args)
train_xgboost('xgb-gpu-hist', data, df, args)
train_lightgbm('lightgbm-cpu', data, df, args)
train_lightgbm('lightgbm-gpu', data, df, args)
train_catboost('cat-cpu', data, df, args)
train_catboost('cat-gpu', data, df, args)
experiments = [
Experiment(data_loader.get_year, "YearPredictionMSD", "Regression", "RMSE"),
Experiment(data_loader.get_synthetic_regression, "Synthetic", "Regression", "RMSE"),
Experiment(data_loader.get_higgs, "Higgs", "Classification", "Accuracy"),
Experiment(data_loader.get_cover_type, "Cover Type", "Multiclass classification", "Accuracy"),
Experiment(data_loader.get_bosch, "Bosch", "Classification", "Accuracy"),
Experiment(data_loader.get_airline, "Airline", "Classification", "Accuracy"),
]
def write_results(df, filename, format):
if format == "latex":
tmp_df = df.copy()
tmp_df.columns = pd.MultiIndex.from_tuples(tmp_df.columns)
with open(filename, "w") as file:
file.write(tmp_df.to_latex())
elif format == "csv":
with open(filename, "w") as file:
file.write(df.to_csv())
else:
raise ValueError("Unknown format: " + format)
print(format + " results written to: " + filename)
def main():
all_dataset_names = ''
for exp in experiments:
all_dataset_names += exp.name + ','
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--rows', type=int, default=None,
help='Max rows to benchmark for each dataset.')
parser.add_argument('--num_rounds', type=int, default=500, help='Boosting rounds.')
parser.add_argument('--datasets', default=all_dataset_names, help='Datasets to run.')
parser.add_argument('--debug_verbose', type=int, default=1)
parser.add_argument('--n_gpus', type=int, default=-1)
parser.add_argument('--algs', default='xgb-cpu-hist,xgb-gpu-hist,lightgbm-cpu,lightgbm-gpu,'
'cat-cpu,cat-gpu', help='Boosting algorithms to run.')
args = parser.parse_args()
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import os
import pandas as pd
import pandas.testing as tm
from fastparquet import ParquetFile
from fastparquet import write, parquet_thrift
from fastparquet import writer, encoding
from pandas.testing import assert_frame_equal
from pandas.api.types import CategoricalDtype
import pytest
from fastparquet.util import default_mkdirs
from .util import s3, tempdir, sql, TEST_DATA
from fastparquet import cencoding
def test_uvarint():
values = np.random.randint(0, 15000, size=100)
buf = np.zeros(30, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
for v in values:
o.seek(0)
cencoding.encode_unsigned_varint(v, o)
o.seek(0)
out = cencoding.read_unsigned_var_int(o)
assert v == out
def test_bitpack():
for _ in range(10):
values = np.random.randint(0, 15000, size=np.random.randint(10, 100),
dtype=np.int32)
width = cencoding.width_from_max_int(values.max())
buf = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
cencoding.encode_bitpacked(values, width, o)
o.seek(0)
head = cencoding.read_unsigned_var_int(o)
buf2 = np.zeros(300, dtype=np.int32)
out = cencoding.NumpyIO(buf2.view("uint8"))
cencoding.read_bitpacked(o, head, width, out)
assert (values == buf2[:len(values)]).all()
assert buf2[len(values):].sum() == 0 # zero padding
assert out.tell() // 8 - len(values) < 8
def test_length():
lengths = np.random.randint(0, 15000, size=100)
buf = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
for l in lengths:
o.seek(0)
o.write_int(l)
o.seek(0)
out = buf.view('int32')[0]
assert l == out
def test_rle_bp():
for _ in range(10):
values = np.random.randint(0, 15000, size=np.random.randint(10, 100),
dtype=np.int32)
buf = np.empty(len(values) + 5, dtype=np.int32)
out = cencoding.NumpyIO(buf.view('uint8'))
buf2 = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf2)
width = cencoding.width_from_max_int(values.max())
# without length
cencoding.encode_rle_bp(values, width, o)
l = o.tell()
o.seek(0)
cencoding.read_rle_bit_packed_hybrid(o, width, length=l, o=out)
assert (buf[:len(values)] == values).all()
def test_roundtrip_s3(s3):
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice([b'hello', b'you',
b'people'], size=1000).astype("O")})
data['hello'] = data.bhello.str.decode('utf8')
data['bcat'] = data.bhello.astype('category')
data.loc[100, 'f'] = np.nan
data['cat'] = data.hello.astype('category')
noop = lambda x: True
myopen = s3.open
write(TEST_DATA+'/temp_parq', data, file_scheme='hive',
row_group_offsets=[0, 500], open_with=myopen, mkdirs=noop)
myopen = s3.open
pf = ParquetFile(TEST_DATA+'/temp_parq', open_with=myopen)
df = pf.to_pandas(categories=['cat', 'bcat'])
for col in data:
assert (df[col] == data[col])[~df[col].isnull()].all()
@pytest.mark.parametrize('scheme', ['simple', 'hive'])
@pytest.mark.parametrize('row_groups', [[0], [0, 500]])
@pytest.mark.parametrize('comp', ['SNAPPY', None, 'GZIP'])
def test_roundtrip(tempdir, scheme, row_groups, comp):
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'u64': np.arange(1000, dtype=np.uint64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice([b'hello', b'you',
b'people'], size=1000).astype("O")})
data['a'] = np.array([b'a', b'b', b'c', b'd', b'e']*200, dtype="S1")
data['aa'] = data['a'].map(lambda x: 2*x).astype("S2")
data['hello'] = data.bhello.str.decode('utf8')
data['bcat'] = data.bhello.astype('category')
data['cat'] = data.hello.astype('category')
fname = os.path.join(tempdir, 'test.parquet')
write(fname, data, file_scheme=scheme, row_group_offsets=row_groups,
compression=comp)
r = ParquetFile(fname)
assert r.fmd.num_rows == r.count() == 1000
df = r.to_pandas()
assert data.cat.dtype == 'category'
for col in r.columns:
assert (df[col] == data[col]).all()
# tests https://github.com/dask/fastparquet/issues/250
assert isinstance(data[col][0], type(df[col][0]))
def test_bad_coltype(tempdir):
df = pd.DataFrame({'0': [1, 2], (0, 1): [3, 4]})
fn = os.path.join(tempdir, 'temp.parq')
with pytest.raises((ValueError, TypeError)) as e:
write(fn, df)
assert "tuple" in str(e.value)
def test_bad_col(tempdir):
df = pd.DataFrame({'x': [1, 2]})
fn = os.path.join(tempdir, 'temp.parq')
with pytest.raises(ValueError) as e:
write(fn, df, has_nulls=['y'])
@pytest.mark.parametrize('scheme', ('simple', 'hive'))
def test_roundtrip_complex(tempdir, scheme,):
import datetime
data = pd.DataFrame({'ui32': np.arange(1000, dtype=np.uint32),
'i16': np.arange(1000, dtype=np.int16),
'ui8': np.array([1, 2, 3, 4]*250, dtype=np.uint8),
'f16': np.arange(1000, dtype=np.float16),
'dicts': [{'oi': 'you'}] * 1000,
't': [datetime.datetime.now()] * 1000,
'td': [datetime.timedelta(seconds=1)] * 1000,
'bool': np.random.choice([True, False], size=1000)
})
data.loc[100, 't'] = None
fname = os.path.join(tempdir, 'test.parquet')
write(fname, data, file_scheme=scheme)
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (df[col] == data[col])[~data[col].isnull()].all()
@pytest.mark.parametrize('df', [
pd.util.testing.makeMixedDataFrame(),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='Europe/London')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='Europe/Berlin')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='UTC')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz=datetime.timezone.min)}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz=datetime.timezone.max)})
])
def test_datetime_roundtrip(tempdir, df, capsys):
fname = os.path.join(tempdir, 'test.parquet')
w = False
if 'x' in df and 'Europe/' in str(df.x.dtype.tz):
with pytest.warns(UserWarning) as w:
write(fname, df)
else:
write(fname, df)
r = ParquetFile(fname)
if w:
assert any("UTC" in str(wm.message) for wm in w.list)
df2 = r.to_pandas()
pd.testing.assert_frame_equal(df, df2, check_categorical=False)
def test_nulls_roundtrip(tempdir):
fname = os.path.join(tempdir, 'temp.parq')
data = pd.DataFrame({'o': np.random.choice(['hello', 'world', None],
size=1000)})
data['cat'] = data['o'].astype('category')
writer.write(fname, data, has_nulls=['o', 'cat'])
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (df[col] == data[col])[~data[col].isnull()].all()
assert (data[col].isnull() == df[col].isnull()).all()
def test_decimal_roundtrip(tempdir):
import decimal
def decimal_convert(x):
return decimal.Decimal(x)
fname = os.path.join(tempdir, 'decitemp.parq')
data = pd.DataFrame({'f64': np.arange(10000000, 10001000, dtype=np.float64) / 100000,
'f16': np.arange(1000, dtype=np.float16) /10000
})
data['f64']=data['f64'].apply(decimal_convert)
data['f16']=data['f16'].apply(decimal_convert)
writer.write(fname, data)
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (data[col] == df[col]).all()
def test_make_definitions_with_nulls():
for _ in range(10):
out = np.empty(1000, dtype=np.int32)
o = cencoding.NumpyIO(out.view("uint8"))
data = pd.Series(np.random.choice([True, None],
size=np.random.randint(1, 1000)))
defs, d2 = writer.make_definitions(data, False)
buf = np.frombuffer(defs, dtype=np.uint8)
i = cencoding.NumpyIO(buf)
cencoding.read_rle_bit_packed_hybrid(i, 1, length=0, o=o)
assert (out[:len(data)] == ~data.isnull()).sum()
def test_make_definitions_without_nulls():
for _ in range(100):
out = np.empty(10000, dtype=np.int32)
o = cencoding.NumpyIO(out.view("uint8"))
data = pd.Series([True] * np.random.randint(1, 10000))
defs, d2 = writer.make_definitions(data, True)
l = len(data) << 1
p = 1
while l > 127:
l >>= 7
p += 1
assert len(defs) == 4 + p + 1 # "length", num_count, value
i = cencoding.NumpyIO(np.frombuffer(defs, dtype=np.uint8))
cencoding.read_rle_bit_packed_hybrid(i, 1, length=0, o=o)
assert (out[:o.tell() // 4] == ~data.isnull()).sum()
# class mock:
# def is_required(self, *args):
# return False
# def max_definition_level(self, *args):
# return 1
# def __getattr__(self, item):
# return None
# halper, metadata = mock(), mock()
def test_empty_row_group(tempdir):
fname = os.path.join(tempdir, 'temp.parq')
data = pd.DataFrame({'o': np.random.choice(['hello', 'world'],
size=1000)})
writer.write(fname, data, row_group_offsets=[0, 900, 1800])
pf = ParquetFile(fname)
assert len(pf.row_groups) == 2
def test_int_rowgroups(tempdir):
df = pd.DataFrame({'a': [1]*100})
fname = os.path.join(tempdir, 'test.parq')
writer.write(fname, df, row_group_offsets=30)
r = ParquetFile(fname)
assert [rg.num_rows for rg in r.row_groups] == [25, 25, 25, 25]
writer.write(fname, df, row_group_offsets=33)
r = ParquetFile(fname)
assert [rg.num_rows for rg in r.row_groups] == [25, 25, 25, 25]
writer.write(fname, df, row_group_offsets=34)
r = ParquetFile(fname)
assert [rg.num_rows for rg in r.row_groups] == [34, 34, 32]
writer.write(fname, df, row_group_offsets=35)
r = ParquetFile(fname)
assert [rg.num_rows for rg in r.row_groups] == [34, 34, 32]
@pytest.mark.parametrize('scheme', ['hive', 'drill'])
def test_groups_roundtrip(tempdir, scheme):
df = pd.DataFrame({'a': np.random.choice(['a', 'b', None], size=1000),
'b': np.random.randint(0, 64000, size=1000),
'c': np.random.choice([True, False], size=1000)})
writer.write(tempdir, df, partition_on=['a', 'c'], file_scheme=scheme)
r = ParquetFile(tempdir)
assert r.columns == ['b']
out = r.to_pandas()
if scheme == 'drill':
assert set(r.cats) == {'dir0', 'dir1'}
assert set(out.columns) == {'b', 'dir0', 'dir1'}
out.rename(columns={'dir0': 'a', 'dir1': 'c'}, inplace=True)
for i, row in out.iterrows():
assert row.b in list(df[(df.a == row.a) & (df.c == row.c)].b)
writer.write(tempdir, df, row_group_offsets=[0, 50], partition_on=['a', 'c'],
file_scheme=scheme)
r = ParquetFile(tempdir)
assert r.fmd.num_rows == r.count() == sum(~df.a.isnull())
assert len(r.row_groups) == 8
out = r.to_pandas()
if scheme == 'drill':
assert set(out.columns) == {'b', 'dir0', 'dir1'}
out.rename(columns={'dir0': 'a', 'dir1': 'c'}, inplace=True)
for i, row in out.iterrows():
assert row.b in list(df[(df.a==row.a)&(df.c==row.c)].b)
def test_groups_iterable(tempdir):
df = pd.DataFrame({'a': np.random.choice(['aaa', 'bbb', None], size=1000),
'b': np.random.randint(0, 64000, size=1000),
'c': np.random.choice([True, False], size=1000)})
writer.write(tempdir, df, partition_on=['a'], file_scheme='hive')
r = ParquetFile(tempdir)
assert r.columns == ['b', 'c']
out = r.to_pandas()
for i, row in out.iterrows():
assert row.b in list(df[(df.a==row.a)&(df.c==row.c)].b)
def test_empty_groupby(tempdir):
df = pd.DataFrame({'a': np.random.choice(['a', 'b', None], size=1000),
'b': np.random.randint(0, 64000, size=1000),
'c': np.random.choice([True, False], size=1000)})
df.loc[499:, 'c'] = True # no False in second half
writer.write(tempdir, df, partition_on=['a', 'c'], file_scheme='hive',
row_group_offsets=[0, 500])
r = ParquetFile(tempdir)
assert r.count() == sum(~df.a.isnull())
assert len(r.row_groups) == 6
out = r.to_pandas()
for i, row in out.iterrows():
assert row.b in list(df[(df.a==row.a)&(df.c==row.c)].b)
def test_too_many_partition_columns(tempdir):
df = pd.DataFrame({'a': np.random.choice(['a', 'b', 'c'], size=1000),
'c': np.random.choice([True, False], size=1000)})
with pytest.raises(ValueError) as ve:
writer.write(tempdir, df, partition_on=['a', 'c'], file_scheme='hive')
assert "Cannot include all columns" in str(ve.value)
def test_read_partitioned_and_write_with_empty_partions(tempdir):
df = pd.DataFrame({'a': np.random.choice(['a', 'b', 'c'], size=1000),
'c': np.random.choice([True, False], size=1000)})
writer.write(tempdir, df, partition_on=['a'], file_scheme='hive')
df_filtered = ParquetFile(tempdir).to_pandas(
filters=[('a', '==', 'b')]
)
writer.write(tempdir, df_filtered, partition_on=['a'], file_scheme='hive')
df_loaded = ParquetFile(tempdir).to_pandas()
tm.assert_frame_equal(df_filtered, df_loaded, check_categorical=False)
@pytest.mark.parametrize('compression', ['GZIP',
'gzip',
None,
{'x': 'GZIP'},
{'y': 'gzip', 'x': None}])
def test_write_compression_dict(tempdir, compression):
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1., 2., 3.]})
fn = os.path.join(tempdir, 'tmp.parq')
writer.write(fn, df, compression=compression)
r = ParquetFile(fn)
df2 = r.to_pandas()
tm.assert_frame_equal(df, df2, check_categorical=False, check_dtype=False)
def test_write_compression_schema(tempdir):
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1., 2., 3.]})
fn = os.path.join(tempdir, 'tmp.parq')
writer.write(fn, df, compression={'x': 'gzip'})
r = ParquetFile(fn)
assert all(c.meta_data.codec for row in r.row_groups
for c in row.columns
if c.meta_data.path_in_schema == ['x'])
assert not any(c.meta_data.codec for row in r.row_groups
for c in row.columns
if c.meta_data.path_in_schema == ['y'])
def test_index(tempdir):
import json
fn = os.path.join(tempdir, 'tmp.parq')
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1., 2., 3.]},
index=pd.Index([10, 20, 30], name='z'))
writer.write(fn, df)
pf = ParquetFile(fn)
assert set(pf.columns) == {'x', 'y', 'z'}
meta = json.loads(pf.key_value_metadata[b'pandas'])
assert meta['index_columns'] == ['z']
out = pf.to_pandas()
assert out.index.name == 'z'
| pd.testing.assert_frame_equal(df, out, check_dtype=False) | pandas.testing.assert_frame_equal |
"""Base Constraint class."""
import copy
import importlib
import inspect
import logging
import pandas as pd
from copulas.multivariate.gaussian import GaussianMultivariate
from rdt import HyperTransformer
from sdv.constraints.errors import MissingConstraintColumnError
LOGGER = logging.getLogger(__name__)
def _get_qualified_name(obj):
"""Return the Fully Qualified Name from an instance or class."""
module = obj.__module__
if hasattr(obj, '__name__'):
obj_name = obj.__name__
else:
obj_name = obj.__class__.__name__
return module + '.' + obj_name
def _module_contains_callable_name(obj):
"""Return if module contains the name of the callable object."""
if hasattr(obj, '__name__'):
obj_name = obj.__name__
else:
obj_name = obj.__class__.__name__
return obj_name in importlib.import_module(obj.__module__).__dict__
def get_subclasses(cls):
"""Recursively find subclasses for the current class object."""
subclasses = dict()
for subclass in cls.__subclasses__():
subclasses[subclass.__name__] = subclass
subclasses.update(get_subclasses(subclass))
return subclasses
def import_object(obj):
"""Import an object from its qualified name."""
if isinstance(obj, str):
package, name = obj.rsplit('.', 1)
return getattr(importlib.import_module(package), name)
return obj
class ConstraintMeta(type):
"""Metaclass for Constraints.
This metaclass replaces the ``__init__`` method with a new function
that stores the arguments passed to the __init__ method in a dict
as the attribute ``__kwargs__``.
This allows us to later on dump the class definition as a dict.
"""
def __init__(self, name, bases, attr):
super().__init__(name, bases, attr)
old__init__ = self.__init__
signature = inspect.signature(old__init__)
arg_names = list(signature.parameters.keys())[1:]
def __init__(self, *args, **kwargs):
class_name = self.__class__.__name__
if name == class_name:
self.__kwargs__ = copy.deepcopy(kwargs)
self.__kwargs__.update(dict(zip(arg_names, args)))
old__init__(self, *args, **kwargs)
__init__.__doc__ = old__init__.__doc__
__init__.__signature__ = signature
self.__init__ = __init__
class Constraint(metaclass=ConstraintMeta):
"""Constraint base class.
This class is not intended to be used directly and should rather be
subclassed to create different types of constraints.
If ``handling_strategy`` is passed with the value ``transform``
or ``reject_sampling``, the ``filter_valid`` or ``transform`` and
``reverse_transform`` methods will be replaced respectively by a simple
identity function.
Args:
handling_strategy (str):
How this Constraint should be handled, which can be ``transform``,
``reject_sampling`` or ``all``.
fit_columns_model (bool):
If False, reject sampling will be used to handle conditional sampling.
Otherwise, a model will be trained and used to sample other columns
based on the conditioned column.
"""
constraint_columns = ()
_hyper_transformer = None
_columns_model = None
def _identity(self, table_data):
return table_data
def __init__(self, handling_strategy, fit_columns_model=True):
self.fit_columns_model = fit_columns_model
if handling_strategy == 'transform':
self.filter_valid = self._identity
elif handling_strategy == 'reject_sampling':
self.transform = self._identity
self.reverse_transform = self._identity
elif handling_strategy != 'all':
raise ValueError('Unknown handling strategy: {}'.format(handling_strategy))
def _fit(self, table_data):
del table_data
def fit(self, table_data):
"""Fit ``Constraint`` class to data.
If ``fit_columns_model`` is True, then this method will fit
a ``GaussianCopula`` model to the relevant columns in ``table_data``.
Subclasses can overwrite this method, or overwrite the ``_fit`` method
if they will not be needing the model to handle conditional sampling.
Args:
table_data (pandas.DataFrame):
Table data.
"""
self._fit(table_data)
if self.fit_columns_model and len(self.constraint_columns) > 1:
data_to_model = table_data[list(self.constraint_columns)]
self._hyper_transformer = HyperTransformer(dtype_transformers={
'O': 'one_hot_encoding',
})
transformed_data = self._hyper_transformer.fit_transform(data_to_model)
self._columns_model = GaussianMultivariate()
self._columns_model.fit(transformed_data)
def _transform(self, table_data):
return table_data
def _reject_sample(self, num_rows, conditions):
sampled = self._columns_model.sample(
num_rows=num_rows,
conditions=conditions
)
sampled = self._hyper_transformer.reverse_transform(sampled)
valid_rows = sampled[self.is_valid(sampled)]
counter = 0
total_sampled = num_rows
while len(valid_rows) < num_rows:
num_valid = len(valid_rows)
if counter >= 100:
if len(valid_rows) == 0:
error = 'Could not get enough valid rows within 100 trials.'
raise ValueError(error)
else:
multiplier = num_rows // num_valid
num_rows_missing = num_rows % num_valid
remainder_rows = valid_rows.iloc[0:num_rows_missing, :]
valid_rows = pd.concat([valid_rows] * multiplier + [remainder_rows],
ignore_index=True)
break
remaining = num_rows - num_valid
valid_probability = (num_valid + 1) / (total_sampled + 1)
max_rows = num_rows * 10
num_to_sample = min(int(remaining / valid_probability), max_rows)
total_sampled += num_to_sample
new_sampled = self._columns_model.sample(
num_rows=num_to_sample,
conditions=conditions
)
new_sampled = self._hyper_transformer.reverse_transform(new_sampled)
new_valid_rows = new_sampled[self.is_valid(new_sampled)]
valid_rows = pd.concat([valid_rows, new_valid_rows], ignore_index=True)
counter += 1
return valid_rows.iloc[0:num_rows, :]
def _sample_constraint_columns(self, table_data):
condition_columns = [c for c in self.constraint_columns if c in table_data.columns]
grouped_conditions = table_data[condition_columns].groupby(condition_columns)
all_sampled_rows = list()
for group, df in grouped_conditions:
if not isinstance(group, tuple):
group = [group]
transformed_condition = self._hyper_transformer.transform(df).iloc[0].to_dict()
sampled_rows = self._reject_sample(
num_rows=df.shape[0],
conditions=transformed_condition
)
all_sampled_rows.append(sampled_rows)
sampled_data = | pd.concat(all_sampled_rows, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
import copy
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_selection import mutual_info_classif, SelectKBest
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from datetime import datetime
from os import listdir
from os.path import isfile, join
import sys
import math
from sklearn.metrics import accuracy_score, f1_score
import re
from Extractor import get_word_length_matrix, get_word_length_matrix_with_interval, get_average_word_length, \
get_word_length_matrix_with_margin, get_char_count, get_digits, get_sum_digits, get_word_n_grams, \
get_char_affix_n_grams, get_char_word_n_grams, get_char_punct_n_grams, get_pos_tags_n_grams, get_bow_matrix, \
get_yules_k, get_special_char_matrix, get_function_words, get_pos_tags, get_sentence_end_start, \
get_flesch_reading_ease_vector, get_sentence_count, get_word_count
from sklearn.preprocessing import StandardScaler, Normalizer
# Chapter 7.1.1. method to trim a feature with low sum e.g. ngrams lower then 5
def trim_df_sum_feature(par_df, par_n):
par_df = par_df.fillna(value=0)
columns = par_df.columns.to_numpy()
data_array = par_df.to_numpy(dtype=float)
sum_arr = data_array.sum(axis=0)
# reduce n if 0 features would be returned
while len(par_df.columns) - len(np.where(sum_arr < par_n)[0]) == 0:
par_n -= 1
positions = list(np.where(sum_arr < par_n))
columns = np.delete(columns, positions)
data_array = np.delete(data_array, positions, axis=1)
return pd.DataFrame(data=data_array, columns=columns)
# Chapter 7.1.1. method to trim feature with low occurrence over all article
def trim_df_by_occurrence(par_df, n):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum()
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] > n:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.1. Process of filtering the data with low occurrence and save the filtered features in a new file
def filter_low_occurrence():
df_bow = pd.read_csv("daten/raw/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_occurrence(df_bow, 1)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/2_filter_low_occurrence/bow.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/raw/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_occurrence(word_n_gram, 1)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", index=False)
for n in range(2, 6):
char_affix_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_affix_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_affix_{n}_gram before: {len(char_affix_n_gram.columns)}")
char_affix_n_gram = trim_df_sum_feature(char_affix_n_gram, 5)
print(f"char_affix_{n}_gram after: {len(char_affix_n_gram.columns)}")
char_affix_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_affix_{n}_gram.csv", index=False)
char_word_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_word_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_word_{n}_gram before: {len(char_word_n_gram.columns)}")
char_word_n_gram = trim_df_sum_feature(char_word_n_gram, 5)
print(f"char_word_{n}_gram after: {len(char_word_n_gram.columns)}")
char_word_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_word_{n}_gram.csv", index=False)
char_punct_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_punct_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_punct_{n}_gram before: {len(char_punct_n_gram.columns)}")
char_punct_n_gram = trim_df_sum_feature(char_punct_n_gram, 5)
print(f"char_punct_{n}_gram after: {len(char_punct_n_gram.columns)}")
char_punct_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_punct_{n}_gram.csv", index=False)
df_f_word = pd.read_csv("daten/raw/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Words before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_occurrence(df_f_word, 1)
print(f"Function Words after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/2_filter_low_occurrence/function_words.csv", index=False)
for n in range(2, 6):
pos_tags_n_gram = pd.read_csv(f"daten/raw/pos_tag_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"pos_tag_{n}_gram before: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram = trim_df_by_occurrence(pos_tags_n_gram, 1)
print(f"pos_tag_{n}_gram after: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram.to_csv(f"daten/2_filter_low_occurrence/pos_tag_{n}_gram.csv", index=False)
# Chapter 7.1.2. method to filter words based on document frequency
def trim_df_by_doc_freq(par_df, par_doc_freq):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum() / len(par_df)
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] < par_doc_freq:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.2 Process of filtering the data with high document frequency and save the filtered features in a new file
def filter_high_document_frequency():
# Filter words with high document frequency
df_bow = pd.read_csv("daten/2_filter_low_occurrence/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/3_fiter_high_frequency/bow.csv", index=False)
df_f_word = pd.read_csv("daten/2_filter_low_occurrence/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Word before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_doc_freq(df_f_word, 0.5)
print(f"Function Word after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/3_fiter_high_frequency/function_words.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", sep=',', encoding="utf-8",
nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_doc_freq(word_n_gram, 0.5)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/3_fiter_high_frequency/word_{n}_gram.csv", index=False)
# Chapter 7.1.4. get the relative frequency based on a length metric (char, word, sentence)
def get_rel_frequency(par_df_count, par_df_len_metric_vector):
df_rel_freq = pd.DataFrame(columns=par_df_count.columns)
for index, row in par_df_count.iterrows():
df_rel_freq = df_rel_freq.append(row.div(par_df_len_metric_vector[index]))
return df_rel_freq
# Chapter 7.1.4. whole process of the chapter. Get the individual relative frequency of a feature and compare
# the correlation to the article length from the absolute and relative feature, save the feature with the estimated
# relative frequency in a new file
def individual_relative_frequency():
df_len_metrics = pd.read_csv(f"daten/1_raw/length_metrics.csv", sep=',', encoding="utf-8", nrows=2500)
# different metrics for individual relative frequencies
metrics = ['word_count', 'char_count', 'sentence_count']
for m in metrics:
# The csv is placed in a folder based on the metric for the individual relative frequency
path = f'daten/4_relative_frequency/{m}'
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
x = pd.read_csv(f"daten/4_relative_frequency/{m}/{f}",
sep=',', encoding="utf-8", nrows=2500).fillna(value=0)
x_rel = get_rel_frequency(x, df_len_metrics[m])
# Save the CSV with relative frequency
x_rel.to_csv(
f"daten/4_relative_frequency/{f.split('.')[0]}"
f"_rel.csv", index=False)
# Correlation is always between the metrics and the word_count
x['word_count'] = df_len_metrics['word_count']
x_rel['word_count'] = df_len_metrics['word_count']
# only on the test data 60/40 split
x_train, x_test = train_test_split(x, test_size=0.4, random_state=42)
x_train_rel, x_test_rel = train_test_split(x_rel, test_size=0.4, random_state=42)
# Calculate the median correlation
print(f"{f}_abs: {x_train.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
print(f"{f}_rel: {x_train_rel.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
# Chapter 7.2.1 First step of the iterative filter: Rank the features
def sort_features_by_score(par_x, par_y, par_select_metric):
# Get a sorted ranking of all features by the selected metric
selector = SelectKBest(par_select_metric, k='all')
selector.fit(par_x, par_y)
# Sort the features by their score
return pd.DataFrame(dict(feature_names=par_x.columns, scores=selector.scores_)).sort_values('scores',
ascending=False)
# Chapter 7.2.1 method to get the best percentile for GNB
def get_best_percentile_gnb(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
gnb = GaussianNB()
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# GNB Training
result_list.append(
cross_val_score(gnb, x_new_training, par_y_train, cv=cv, n_jobs=-1, scoring='accuracy').mean())
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"GNB Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 0.5% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for SVC
def get_best_percentile_svc(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
# Parameter for SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# SVC Test
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"SVC Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for KNN
def get_best_percentile_knn(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# Parameter for KNN
# Some Values from 3 to square of samples
neighbors = [i for i in range(3, int(math.sqrt(len(x_new_training.index))), 13)]
neighbors += [1, 3, 5, 11, 19, 36]
if int(math.sqrt(len(feature_list))) not in neighbors:
neighbors.append(int(math.sqrt(len(x_new_training.index))))
# Not more neighbors then samples-2
neighbors = [x for x in neighbors if x < len(x_new_training.index) - 2]
# remove duplicates
neighbors = list(set(neighbors))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN Training
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"KNN Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y >= best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 Filter the feature based on the estimated best percentile and save it into a new file
def print_filter_feature_percentile(par_path, par_df_sorted_features, par_percent, par_x, par_file_name):
# select the 1 percent of the features (len/100) multiplied by par_best_percent
number_features = round(par_percent * (len(par_x.columns) / 100))
# If the 1st percent is less then 1
number_features = 1 if number_features < 1 else number_features
feature_list = par_df_sorted_features['feature_names'][:number_features].tolist()
# print the name of the features in a file
original_stdout = sys.stdout
with open(f'{par_path}selected_features/{par_file_name}_filtered.txt', 'w', encoding="utf-8") as f:
sys.stdout = f
print(f"Features: {len(feature_list)}")
print(f"{feature_list}")
sys.stdout = original_stdout
# select the best features from the original dataset
par_x[feature_list].to_csv(f"{par_path}csv_after_filter/{par_file_name}_filtered.csv", index=False)
# Chapter 7.2.1 Complete process of the iterative Filter
def iterative_filter_process(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb, best_round_gnb, result_list_gnb = get_best_percentile_gnb(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_knn, best_round_knn, result_list_knn = get_best_percentile_knn(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_gnb, x, "gnb_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, "svc_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_knn, x, "knn_" + filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as f:
sys.stdout = f
print(f"best_perc_gnb: ({best_perc_gnb}|{result_list_gnb[best_round_gnb]})\n"
f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n"
f"best_perc_knn: ({best_perc_knn}|{result_list_knn[best_round_knn]})")
sys.stdout = original_stdout
# draw diagram
len_list = [len(result_list_gnb), len(result_list_svc), len(result_list_knn)]
plt.plot([i * step_perc for i in range(1, len(result_list_gnb) + 1)], result_list_gnb, 'r-', label="gnb")
plt.plot(best_perc_gnb, result_list_gnb[best_round_gnb], 'rx')
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.plot([i * step_perc for i in range(1, len(result_list_knn) + 1)], result_list_knn, 'b-', label="knn")
plt.plot(best_perc_knn, result_list_knn[best_round_knn], 'bx')
plt.axis([step_perc, (max(len_list) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, max(len_list) + 1)], columns=['percent'])
df_gnb = pd.DataFrame(data=result_list_gnb, columns=['gnb'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_knn = pd.DataFrame(data=result_list_knn, columns=['knn'])
df_accuracy = pd.concat([df_percent, df_gnb, df_svc, df_knn], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 8.1. and later, basically the process of the iterative filter only with the svc classifier
def iterative_filter_process_svm(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for svc
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as out_f:
sys.stdout = out_f
print(f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n")
sys.stdout = original_stdout
# draw diagram
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.axis([step_perc, (len(result_list_svc) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, len(result_list_svc) + 1)], columns=['percent'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_accuracy = pd.concat([df_percent, df_svc], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 7.2.1. Get the accuracy of the features before the iterative filter, results in table 18
def get_accuracy_before_iterative_filter():
gnb_result_list, svc_result_list, knn_result_list, gnb_time_list, svc_time_list, knn_time_list \
= [], [], [], [], [], []
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
path = f'daten/5_iterative_filter/csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# Get the feature names for the table
feature_list = [re.search("(.+?(?=_rel))", f).group(1) for f in files]
for f in files:
print(f)
x = pd.read_csv(f"daten/5_iterative_filter/csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
gnb_result_list.append(score)
gnb_time_list.append(time_taken)
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC time for {f}: {time_taken}")
svc_result_list.append(score)
svc_time_list.append(time_taken)
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN time for {f}: {time_taken}")
knn_result_list.append(score)
knn_time_list.append(time_taken)
# create dataframe with the scores and times
df_results = pd.DataFrame()
df_results['feature'] = feature_list
df_results['score_gnb'] = gnb_result_list
df_results['time_gnb'] = gnb_time_list
df_results['score_svc'] = svc_result_list
df_results['time_svc'] = svc_time_list
df_results['score_knn'] = knn_result_list
df_results['time_knn'] = knn_time_list
return df_results
# Chapter 7.2.1. Get the accuracy of the features after the iterative filter, results in table 18
def get_accuracy_after_iterative_filter():
df_gnb_result = pd.DataFrame(columns=['feature', 'score_gnb', 'time_gnb'])
df_svc_result = pd.DataFrame(columns=['feature', 'score_svc', 'time_svc'])
df_knn_result = pd.DataFrame(columns=['feature', 'score_knn', 'time_knn'])
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
# path = f'daten/5_iterative_filter/csv_after_filter'
path = f'daten/5_iterative_filter/5_iterative_filter/csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
for f in files:
print(f)
# Get the feature name for the table
feature = re.search(".{4}(.+?(?=_rel))", f).group(1)
# x = pd.read_csv(f"daten/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x = pd.read_csv(f"daten/5_iterative_filter/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8",
nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# Select the classifier by the start of the filename
if f.split("_")[0] == "gnb":
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
df_gnb_result = df_gnb_result.append(pd.DataFrame(data={'feature': [feature], 'score_gnb': [score],
'time_gnb': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "svc":
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC training time for {f}: {time_taken}")
df_svc_result = df_svc_result.append(pd.DataFrame(data={'feature': [feature], 'score_svc': [score],
'time_svc': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "knn":
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
start_time = datetime.now()
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN test time for {f}: {time_taken}")
df_knn_result = df_knn_result.append(pd.DataFrame(data={'feature': [feature], 'score_knn': [score],
'time_knn': [time_taken]}), ignore_index=True)
df_merge = pd.merge(df_gnb_result, df_knn_result, on="feature", how='outer')
df_merge = pd.merge(df_merge, df_svc_result, on="feature", how='outer')
return df_merge
# Get n article for a given number of authors. Required for setups with different numbers of authors and article
def get_n_article_by_author(par_df, par_label_count, par_article_count):
df_articles = pd.DataFrame(columns=['label_encoded', 'text'])
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if list_article_count[labels.index(row['label_encoded'])] != 0:
d = {'label_encoded': [row['label_encoded']], 'text': [row['text']]}
df_articles = df_articles.append(pd.DataFrame.from_dict(d), ignore_index=True)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return df_articles
# Return indices for n article for a given number of authors. Required for setups with different
# numbers of authors and article
def get_n_article_index_by_author(par_df, par_label_count, par_article_count):
index_list = []
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if row['label_encoded'] in labels:
if list_article_count[labels.index(row['label_encoded'])] != 0:
index_list.append(index)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return index_list
# Method to estimate the f1 score of the test data for GNB
def get_f1_for_gnb(par_x_train, par_x_test, par_y_train, par_y_test):
gnb = GaussianNB()
# GNB fit
gnb.fit(par_x_train, par_y_train)
# score on test data
gnb_score = f1_score(gnb.predict(par_x_test), par_y_test, average='micro')
return gnb_score
# Method to estimate the f1 score of the test data for SVC
def get_f1_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = f1_score(svc.predict(par_x_test), par_y_test, average='micro')
return svc_score
# Method to estimate the f1 score of the test data for KNN
def get_f1_for_knn(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# define param grid for knn, neighbors has the be lower than samples
neighbors = [1, 3, 5, 11, 19, 36, 50]
# number of neighbors must be less than number of samples
neighbors = [x for x in neighbors if x < len(par_x_test)]
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(par_x_train, par_y_train)
# predict test data
knn_score = f1_score(knn.predict(par_x_test), par_y_test, average='micro')
return knn_score
# Method to estimate the accuracy of the test data for SVC
def get_accuracy_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = accuracy_score(svc.predict(par_x_test), par_y_test)
return svc_score
# Chapter 7.3.1. comparison of the word length feature alternatives
def compare_word_length_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'wl_matrix_gnb': [], 'wl_matrix_svc': [], 'wl_matrix_knn': [],
'wl_matrix_bins_20_30_gnb': [], 'wl_matrix_bins_20_30_svc': [], 'wl_matrix_bins_20_30_knn': [],
'wl_matrix_bins_10_20_gnb': [], 'wl_matrix_bins_10_20_svc': [], 'wl_matrix_bins_10_20_knn': [],
'wl_matrix_20_gnb': [], 'wl_matrix_20_svc': [], 'wl_matrix_20_knn': [],
'wl_avg_gnb': [], 'wl_avg_svc': [], 'wl_avg_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
word_count = get_word_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["wl_matrix", "wl_matrix_bins_20_30", "wl_matrix_bins_10_20", "wl_avg", "wl_matrix_20"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "wl_matrix":
x = get_rel_frequency(get_word_length_matrix(df_article).fillna(value=0), word_count['word_count'])
elif feature == "wl_matrix_bins_20_30":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 20, 30).fillna(value=0),
word_count['word_count'])
elif feature == "wl_matrix_bins_10_20":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 10, 20).fillna(value=0),
word_count['word_count'])
elif feature == "wl_avg":
x = get_average_word_length(df_article)
elif feature == "wl_matrix_20":
x = get_word_length_matrix_with_margin(df_article, 20)
# Scale the data, else high counter in wl_matrix can dominate and hyperparameter optimization for svc
# takes a while because of small differences from average
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.2. comparison of the digit feature alternatives
def compare_digit_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'digit_sum_gnb': [], 'digit_sum_svc': [], 'digit_sum_knn': [],
'digits_gnb': [], 'digits_svc': [], 'digits_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
char_count = get_char_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["digit_sum", "digits"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "digit_sum":
x = get_rel_frequency(get_sum_digits(df_article).fillna(value=0), char_count['char_count'])
elif feature == "digits":
x = get_rel_frequency(get_digits(df_article).fillna(value=0), char_count['char_count'])
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 4-6
def compare_word_4_6_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w4g_gnb': [], 'w4g_svc': [], 'w4g_knn': [],
'w5g_gnb': [], 'w5g_svc': [], 'w5g_knn': [],
'w6g_gnb': [], 'w6g_svc': [], 'w6g_knn': []}
# load the data
df_w4g = pd.read_csv("daten/6_feature_analysis/input_data/word_4_gram_rel.csv", sep=',', encoding="utf-8")
df_w5g = pd.read_csv("daten/6_feature_analysis/input_data/word_5_gram_rel.csv", sep=',', encoding="utf-8")
df_w6g = pd.read_csv("daten/6_feature_analysis/input_data/word_6_gram_rel.csv", sep=',', encoding="utf-8")
for author_texts in list_author_texts:
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_all_texts, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# Get the scores for every feature
for feature in ["w4g", "w5g", "w6g"]:
# select the indices from the article rows by the given indices
if feature == "w4g":
x = df_w4g.iloc[index_list]
elif feature == "w5g":
x = df_w5g.iloc[index_list]
elif feature == "w6g":
x = df_w6g.iloc[index_list]
# Delete features which only occur once
x = trim_df_by_occurrence(x, 1)
# reset the indices to have a order from 0 to authors * text per author - 1
x = x.reset_index(drop=True)
y = df_all_texts.iloc[index_list]['label_encoded']
y = y.reset_index(drop=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 2-3
def compare_word_2_3_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w2g_gnb': [], 'w2g_svc': [], 'w2g_knn': [],
'w3g_gnb': [], 'w3g_svc': [], 'w3g_knn': []}
for author_texts in list_author_texts:
print(f"Texte pro Autor: {author_texts}")
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_balanced, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# select the indices from the article rows by the given indices
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
print(f"Artikel: {len(df_balanced.index)}")
# extract the features
df_w2g = get_word_n_grams(df_balanced, 2)
df_w3g = get_word_n_grams(df_balanced, 3)
# Preprocessing steps
word_count = get_word_count(df_balanced)
df_w2g = preprocessing_steps_pos_tag_n_grams(df_w2g, word_count['word_count'])
df_w3g = preprocessing_steps_pos_tag_n_grams(df_w3g, word_count['word_count'])
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_w2g[df_w2g.columns] = scaler.fit_transform(df_w2g[df_w2g.columns])
df_w3g[df_w3g.columns] = scaler.fit_transform(df_w3g[df_w3g.columns])
label = df_balanced['label_encoded']
# Train/Test 60/40 split
df_w2g_train, df_w2g_test, df_w3g_train, df_w3g_test, label_train, label_test = \
train_test_split(df_w2g, df_w3g, label, test_size=0.4, random_state=42, stratify=label)
# Get the scores for every feature
for feature in ["w2g", "w3g"]:
# select the indices from the article rows by the given indices
# iterative filter
# returns df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
if feature == "w2g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w2g_train, df_w2g_test, label_train, 1.0, mutual_info_classif)
elif feature == "w3g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w3g_train, df_w3g_test, label_train, 1.0, mutual_info_classif)
# Do not use iterative filter for gnb train caused by bad results
x_gnb_train, x_gnb_test, label_train, label_test = \
train_test_split(df_w3g, label, test_size=0.4, random_state=42, stratify=label)
print(f"cv: {cv}")
print(f"Train Labels: {label_train.value_counts()}")
print(f"Test Labels: {label_test.value_counts()}")
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.4. comparison of the different lengths of char ngrams
# Chapter 7.3.4. whole process of the comparison of the char-n-gram features
def compare_char_n_grams_process(par_base_path):
df_all_texts = | pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8") | pandas.read_csv |
from polo2 import PoloDb
import pandas as pd
import numpy as np
import sqlite3
class Corpus(object):
def __init__(self, config):
corpus_db_file = self.config.generate_corpus_db_file_path()
self.corpus = PoloDb(corpus_db_file)
class Elements(object):
def __init__(self, config, trial_name='trial1'):
# Set some values
if trial_name not in config.trials:
raise ValueError("Invalid trail name `{}`.format(trial)")
self.config = config
self.trial = trial_name
self.slug = self.config.ini['DEFAULT']['slug']
self.base_path = self.config.ini['DEFAULT']['base_path']
self.thresh = float(self.config.ini['DEFAULT']['thresh'])
# Load the databases
corpus_db_file = self.config.generate_corpus_db_file_path()
model_db_file = self.config.generate_model_db_file_path(self.trial)
self.corpus = PoloDb(corpus_db_file)
self.model = PoloDb(model_db_file)
def get_table(self, table_name, db_conn):
df = self.get_sql("SELECT * FROM {}".format(table_name), db_conn)
return df
def get_sql(self, query, db_conn, params=()):
try:
df = pd.read_sql_query(query, db_conn, params=params)
return df
except:
return None
def get_doc_count(self):
self.doc_count = pd.read_sql_query('SELECT count(*) AS n FROM doc', self.corpus.conn).n.tolist()[0]
return self.doc_count
def get_topic_count(self):
self.topic_count = pd.read_sql_query('SELECT count(*) AS n FROM topic', self.model.conn).n.tolist()[0]
return self.topic_count
def get_topic(self, topic_id):
topic_id = int(topic_id)
sql = 'SELECT * FROM topic WHERE topic_id = ?'
df = | pd.read_sql_query(sql, self.model.conn, params=(topic_id,)) | pandas.read_sql_query |
"""
Tax-Calculator tax-filing-unit Records class.
"""
# CODING-STYLE CHECKS:
# pycodestyle records.py
# pylint --disable=locally-disabled records.py
import os
import json
import six
import numpy as np
import pandas as pd
from taxcalc.growfactors import GrowFactors
from taxcalc.utils import read_egg_csv, read_egg_json
class Records(object):
"""
Constructor for the tax-filing-unit Records class.
Parameters
----------
data: string or Pandas DataFrame
string describes CSV file in which records data reside;
DataFrame already contains records data;
default value is the string 'puf.csv'
For details on how to use your own data with the Tax-Calculator,
look at the test_Calculator_using_nonstd_input() function in the
tests/test_calculate.py file.
exact_calculations: boolean
specifies whether or not exact tax calculations are done without
any smoothing of "stair-step" provisions in income tax law;
default value is false.
gfactors: GrowFactors class instance or None
containing record data extrapolation (or "blowup") factors.
NOTE: the constructor should never call the _blowup() method.
weights: string or Pandas DataFrame or None
string describes CSV file in which weights reside;
DataFrame already contains weights;
None creates empty sample-weights DataFrame;
default value is filename of the PUF weights.
adjust_ratios: string or Pandas DataFrame or None
string describes CSV file in which adjustment ratios reside;
DataFrame already contains adjustment ratios;
None creates empty adjustment-ratios DataFrame;
default value is filename of the PUF adjustment ratios.
start_year: integer
specifies calendar year of the input data;
default value is PUFCSV_YEAR.
Note that if specifying your own data (see above) as being a custom
data set, be sure to explicitly set start_year to the
custom data's calendar year. For details on how to
use your own data with the Tax-Calculator, read the
DATAPREP.md file in the top-level directory and then
look at the test_Calculator_using_nonstd_input()
function in the taxcalc/tests/test_calculate.py file.
Raises
------
ValueError:
if data is not the appropriate type.
if taxpayer and spouse variables do not add up to filing-unit total.
if dividends is less than qualified dividends.
if gfactors is not None or a GrowFactors class instance.
if start_year is not an integer.
if files cannot be found.
Returns
-------
class instance: Records
Notes
-----
Typical usage when using PUF input data is as follows::
recs = Records()
which uses all the default parameters of the constructor, and
therefore, imputed variables are generated to augment the data and
initial-year grow factors are applied to the data. There are
situations in which you need to specify the values of the Record
constructor's arguments, but be sure you know exactly what you are
doing when attempting this.
Use Records.cps_constructor() to get a Records object instantiated
with CPS input data.
"""
# suppress pylint warnings about unrecognized Records variables:
# pylint: disable=no-member
# suppress pylint warnings about uppercase variable names:
# pylint: disable=invalid-name
# suppress pylint warnings about too many class instance attributes:
# pylint: disable=too-many-instance-attributes
PUFCSV_YEAR = 2011
CPSCSV_YEAR = 2014
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
PUF_WEIGHTS_FILENAME = 'puf_weights.csv.gz'
PUF_RATIOS_FILENAME = 'puf_ratios.csv'
CPS_WEIGHTS_FILENAME = 'cps_weights.csv.gz'
CPS_RATIOS_FILENAME = None
VAR_INFO_FILENAME = 'records_variables.json'
CPS_BENEFITS_FILENAME = 'cps_benefits.csv.gz'
def __init__(self,
data='puf.csv',
exact_calculations=False,
gfactors=GrowFactors(),
weights=PUF_WEIGHTS_FILENAME,
adjust_ratios=PUF_RATIOS_FILENAME,
benefits=None,
start_year=PUFCSV_YEAR):
# pylint: disable=too-many-arguments,too-many-locals
self.__data_year = start_year
# read specified data
self._read_data(data, exact_calculations, (benefits is None))
# check that three sets of split-earnings variables have valid values
msg = 'expression "{0} == {0}p + {0}s" is not true for every record'
tol = 0.020001 # handles "%.2f" rounding errors
if not np.allclose(self.e00200, (self.e00200p + self.e00200s),
rtol=0.0, atol=tol):
raise ValueError(msg.format('e00200'))
if not np.allclose(self.e00900, (self.e00900p + self.e00900s),
rtol=0.0, atol=tol):
raise ValueError(msg.format('e00900'))
if not np.allclose(self.e02100, (self.e02100p + self.e02100s),
rtol=0.0, atol=tol):
raise ValueError(msg.format('e02100'))
# check that ordinary dividends are no less than qualified dividends
other_dividends = np.maximum(0., self.e00600 - self.e00650)
if not np.allclose(self.e00600, self.e00650 + other_dividends,
rtol=0.0, atol=tol):
msg = 'expression "e00600 >= e00650" is not true for every record'
raise ValueError(msg)
del other_dividends
# check that total pension income is no less than taxable pension inc
nontaxable_pensions = np.maximum(0., self.e01500 - self.e01700)
if not np.allclose(self.e01500, self.e01700 + nontaxable_pensions,
rtol=0.0, atol=tol):
msg = 'expression "e01500 >= e01700" is not true for every record'
raise ValueError(msg)
del nontaxable_pensions
# handle grow factors
is_correct_type = isinstance(gfactors, GrowFactors)
if gfactors is not None and not is_correct_type:
msg = 'gfactors is neither None nor a GrowFactors instance'
raise ValueError(msg)
self.gfactors = gfactors
# read sample weights
self.WT = None
self._read_weights(weights)
self.ADJ = None
self._read_ratios(adjust_ratios)
# read extrapolated benefit variables
self.BEN = None
self._read_benefits(benefits)
# weights must be same size as tax record data
if self.WT.size > 0 and self.array_length != len(self.WT.index):
# scale-up sub-sample weights by year-specific factor
sum_full_weights = self.WT.sum()
self.WT = self.WT.iloc[self.__index]
sum_sub_weights = self.WT.sum()
factor = sum_full_weights / sum_sub_weights
self.WT *= factor
# specify current_year and FLPDYR values
if isinstance(start_year, int):
self.__current_year = start_year
self.FLPDYR.fill(start_year)
else:
msg = 'start_year is not an integer'
raise ValueError(msg)
# construct sample weights for current_year
if self.WT.size > 0:
wt_colname = 'WT{}'.format(self.current_year)
if wt_colname in self.WT.columns:
self.s006 = self.WT[wt_colname] * 0.01
# specify that variable values do not include behavioral responses
self.behavioral_responses_are_included = False
@staticmethod
def cps_constructor(data=None,
no_benefits=False,
exact_calculations=False,
gfactors=GrowFactors()):
"""
Static method returns a Records object instantiated with CPS
input data. This works in a analogous way to Records(), which
returns a Records object instantiated with PUF input data.
This is a convenience method that eliminates the need to
specify all the details of the CPS input data just as the
default values of the arguments of the Records class constructor
eliminate the need to specify all the details of the PUF input
data.
"""
if data is None:
data = os.path.join(Records.CUR_PATH, 'cps.csv.gz')
if no_benefits:
benefits_filename = None
else:
benefits_filename = Records.CPS_BENEFITS_FILENAME
return Records(data=data,
exact_calculations=exact_calculations,
gfactors=gfactors,
weights=Records.CPS_WEIGHTS_FILENAME,
adjust_ratios=Records.CPS_RATIOS_FILENAME,
benefits=benefits_filename,
start_year=Records.CPSCSV_YEAR)
@property
def data_year(self):
"""
Records class original data year property.
"""
return self.__data_year
@property
def current_year(self):
"""
Records class current calendar year property.
"""
return self.__current_year
@property
def array_length(self):
"""
Length of arrays in Records class's DataFrame.
"""
return self.__dim
def increment_year(self):
"""
Add one to current year.
Also, does extrapolation, reweighting, adjusting for new current year.
"""
# no incrementing Records object that includes behavioral responses
assert self.behavioral_responses_are_included is False
# move to next year
self.__current_year += 1
# apply variable extrapolation grow factors
if self.gfactors is not None:
self._blowup(self.__current_year)
# apply variable adjustment ratios
self._adjust(self.__current_year)
# specify current-year sample weights
if self.WT.size > 0:
wt_colname = 'WT{}'.format(self.__current_year)
self.s006 = self.WT[wt_colname] * 0.01
# extrapolate benefit values
if self.BEN.size > 0:
self._extrapolate_benefits(self.current_year)
def set_current_year(self, new_current_year):
"""
Set current year to specified value and updates FLPDYR variable.
Unlike increment_year method, extrapolation, reweighting, adjusting
are skipped.
"""
self.__current_year = new_current_year
self.FLPDYR.fill(new_current_year)
@staticmethod
def read_var_info():
"""
Read Records variables metadata from JSON file;
returns dictionary and specifies static varname sets listed below.
"""
var_info_path = os.path.join(Records.CUR_PATH,
Records.VAR_INFO_FILENAME)
if os.path.exists(var_info_path):
with open(var_info_path) as vfile:
vardict = json.load(vfile)
else:
# cannot call read_egg_ function in unit tests
vardict = read_egg_json(
Records.VAR_INFO_FILENAME) # pragma: no cover
Records.INTEGER_READ_VARS = set(k for k, v in vardict['read'].items()
if v['type'] == 'int')
FLOAT_READ_VARS = set(k for k, v in vardict['read'].items()
if v['type'] == 'float')
Records.MUST_READ_VARS = set(k for k, v in vardict['read'].items()
if v.get('required'))
Records.USABLE_READ_VARS = Records.INTEGER_READ_VARS | FLOAT_READ_VARS
INT_CALCULATED_VARS = set(k for k, v in vardict['calc'].items()
if v['type'] == 'int')
FLOAT_CALCULATED_VARS = set(k for k, v in vardict['calc'].items()
if v['type'] == 'float')
FIXED_CALCULATED_VARS = set(k for k, v in vardict['calc'].items()
if v['type'] == 'unchanging_float')
Records.CALCULATED_VARS = (INT_CALCULATED_VARS |
FLOAT_CALCULATED_VARS |
FIXED_CALCULATED_VARS)
Records.CHANGING_CALCULATED_VARS = FLOAT_CALCULATED_VARS
Records.INTEGER_VARS = Records.INTEGER_READ_VARS | INT_CALCULATED_VARS
return vardict
# specify various sets of variable names
INTEGER_READ_VARS = None
MUST_READ_VARS = None
USABLE_READ_VARS = None
CALCULATED_VARS = None
CHANGING_CALCULATED_VARS = None
INTEGER_VARS = None
# ----- begin private methods of Records class -----
def _blowup(self, year):
"""
Apply to variables the grow factors for specified calendar year.
"""
# pylint: disable=too-many-locals,too-many-statements
AWAGE = self.gfactors.factor_value('AWAGE', year)
AINTS = self.gfactors.factor_value('AINTS', year)
ADIVS = self.gfactors.factor_value('ADIVS', year)
ATXPY = self.gfactors.factor_value('ATXPY', year)
ASCHCI = self.gfactors.factor_value('ASCHCI', year)
ASCHCL = self.gfactors.factor_value('ASCHCL', year)
ACGNS = self.gfactors.factor_value('ACGNS', year)
ASCHEI = self.gfactors.factor_value('ASCHEI', year)
ASCHEL = self.gfactors.factor_value('ASCHEL', year)
ASCHF = self.gfactors.factor_value('ASCHF', year)
AUCOMP = self.gfactors.factor_value('AUCOMP', year)
ASOCSEC = self.gfactors.factor_value('ASOCSEC', year)
ACPIM = self.gfactors.factor_value('ACPIM', year)
ABOOK = self.gfactors.factor_value('ABOOK', year)
AIPD = self.gfactors.factor_value('AIPD', year)
self.e00200 *= AWAGE
self.e00200p *= AWAGE
self.e00200s *= AWAGE
self.e00300 *= AINTS
self.e00400 *= AINTS
self.e00600 *= ADIVS
self.e00650 *= ADIVS
self.e00700 *= ATXPY
self.e00800 *= ATXPY
self.e00900s[:] = np.where(self.e00900s >= 0,
self.e00900s * ASCHCI,
self.e00900s * ASCHCL)
self.e00900p[:] = np.where(self.e00900p >= 0,
self.e00900p * ASCHCI,
self.e00900p * ASCHCL)
self.e00900[:] = self.e00900p + self.e00900s
self.e01100 *= ACGNS
self.e01200 *= ACGNS
self.e01400 *= ATXPY
self.e01500 *= ATXPY
self.e01700 *= ATXPY
self.e02000[:] = np.where(self.e02000 >= 0,
self.e02000 * ASCHEI,
self.e02000 * ASCHEL)
self.e02100 *= ASCHF
self.e02100p *= ASCHF
self.e02100s *= ASCHF
self.e02300 *= AUCOMP
self.e02400 *= ASOCSEC
self.e03150 *= ATXPY
self.e03210 *= ATXPY
self.e03220 *= ATXPY
self.e03230 *= ATXPY
self.e03270 *= ACPIM
self.e03240 *= ATXPY
self.e03290 *= ACPIM
self.e03300 *= ATXPY
self.e03400 *= ATXPY
self.e03500 *= ATXPY
self.e07240 *= ATXPY
self.e07260 *= ATXPY
self.e07300 *= ABOOK
self.e07400 *= ABOOK
self.p08000 *= ATXPY
self.e09700 *= ATXPY
self.e09800 *= ATXPY
self.e09900 *= ATXPY
self.e11200 *= ATXPY
# ITEMIZED DEDUCTIONS
self.e17500 *= ACPIM
self.e18400 *= ATXPY
self.e18500 *= ATXPY
self.e19200 *= AIPD
self.e19800 *= ATXPY
self.e20100 *= ATXPY
self.e20400 *= ATXPY
self.g20500 *= ATXPY
# CAPITAL GAINS
self.p22250 *= ACGNS
self.p23250 *= ACGNS
self.e24515 *= ACGNS
self.e24518 *= ACGNS
# SCHEDULE E
self.e26270 *= ASCHEI
self.e27200 *= ASCHEI
self.k1bx14p *= ASCHEI
self.k1bx14s *= ASCHEI
# MISCELLANOUS SCHEDULES
self.e07600 *= ATXPY
self.e32800 *= ATXPY
self.e58990 *= ATXPY
self.e62900 *= ATXPY
self.e87530 *= ATXPY
self.e87521 *= ATXPY
self.cmbtp *= ATXPY
def _adjust(self, year):
"""
Adjust value of income variables to match SOI distributions
Note: adjustment must leave variables as numpy.ndarray type
"""
if self.ADJ.size > 0:
# Interest income
self.e00300 *= self.ADJ['INT{}'.format(year)][self.agi_bin].values
def _extrapolate_benefits(self, year):
"""
Extrapolate benefit variables
"""
setattr(self, 'housing_ben', self.BEN['housing_{}'.format(year)])
setattr(self, 'ssi_ben', self.BEN['ssi_{}'.format(year)])
setattr(self, 'snap_ben', self.BEN['snap_{}'.format(year)])
setattr(self, 'tanf_ben', self.BEN['tanf_{}'.format(year)])
setattr(self, 'vet_ben', self.BEN['vet_{}'.format(year)])
setattr(self, 'wic_ben', self.BEN['wic_{}'.format(year)])
setattr(self, 'mcare_ben', self.BEN['mcare_{}'.format(year)])
setattr(self, 'mcaid_ben', self.BEN['mcaid_{}'.format(year)])
self.other_ben *= self.gfactors.factor_value('ABENEFITS', year)
def _read_data(self, data, exact_calcs, no_benefits):
"""
Read Records data from file or use specified DataFrame as data.
Specifies exact array depending on boolean value of exact_calcs.
Set benefits to zero if no_benefits is True; otherwise do nothing.
"""
# pylint: disable=too-many-statements,too-many-branches
if Records.INTEGER_VARS is None:
Records.read_var_info()
# read specified data
if isinstance(data, pd.DataFrame):
taxdf = data
elif isinstance(data, six.string_types):
if os.path.isfile(data):
taxdf = pd.read_csv(data)
else:
# cannot call read_egg_ function in unit tests
taxdf = read_egg_csv(data) # pragma: no cover
else:
msg = 'data is neither a string nor a Pandas DataFrame'
raise ValueError(msg)
self.__dim = len(taxdf.index)
self.__index = taxdf.index
# create class variables using taxdf column names
READ_VARS = set()
self.IGNORED_VARS = set()
for varname in list(taxdf.columns.values):
if varname in Records.USABLE_READ_VARS:
READ_VARS.add(varname)
if varname in Records.INTEGER_READ_VARS:
setattr(self, varname,
taxdf[varname].astype(np.int32).values)
else:
setattr(self, varname,
taxdf[varname].astype(np.float64).values)
else:
self.IGNORED_VARS.add(varname)
# check that MUST_READ_VARS are all present in taxdf
if not Records.MUST_READ_VARS.issubset(READ_VARS):
msg = 'Records data missing one or more MUST_READ_VARS'
raise ValueError(msg)
# delete intermediate taxdf object
del taxdf
# create other class variables that are set to all zeros
UNREAD_VARS = Records.USABLE_READ_VARS - READ_VARS
ZEROED_VARS = Records.CALCULATED_VARS | UNREAD_VARS
for varname in ZEROED_VARS:
if varname in Records.INTEGER_VARS:
setattr(self, varname,
np.zeros(self.array_length, dtype=np.int32))
else:
setattr(self, varname,
np.zeros(self.array_length, dtype=np.float64))
# check for valid MARS values
if not np.all(np.logical_and(np.greater_equal(self.MARS, 1),
np.less_equal(self.MARS, 5))):
raise ValueError('not all MARS values in [1,5] range')
# create variables derived from MARS, which is in MUST_READ_VARS
self.num[:] = np.where(self.MARS == 2, 2, 1)
self.sep[:] = np.where(self.MARS == 3, 2, 1)
# check for valid EIC values
if not np.all(np.logical_and(np.greater_equal(self.EIC, 0),
np.less_equal(self.EIC, 3))):
raise ValueError('not all EIC values in [0,3] range')
# specify value of exact array
self.exact[:] = np.where(exact_calcs is True, 1, 0)
# optionally set benefits to zero
if no_benefits:
self.housing_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.ssi_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.snap_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.tanf_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.vet_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.wic_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.mcare_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.mcaid_ben[:] = np.zeros(self.array_length, dtype=np.float64)
self.other_ben[:] = np.zeros(self.array_length, dtype=np.float64)
# delete intermediate variables
del READ_VARS
del UNREAD_VARS
del ZEROED_VARS
def zero_out_changing_calculated_vars(self):
"""
Set to zero all variables in the Records.CHANGING_CALCULATED_VARS set.
"""
for varname in Records.CHANGING_CALCULATED_VARS:
var = getattr(self, varname)
var.fill(0.)
del var
def _read_weights(self, weights):
"""
Read Records weights from file or
use specified DataFrame as data or
create empty DataFrame if None.
Assumes weights are integers equal to 100 times the real weight.
"""
if weights is None:
setattr(self, 'WT', pd.DataFrame({'nothing': []}))
return
if isinstance(weights, pd.DataFrame):
WT = weights
elif isinstance(weights, six.string_types):
weights_path = os.path.join(Records.CUR_PATH, weights)
if os.path.isfile(weights_path):
WT = pd.read_csv(weights_path)
else:
# cannot call read_egg_ function in unit tests
WT = read_egg_csv(
os.path.basename(weights_path)) # pragma: no cover
else:
msg = 'weights is not None or a string or a Pandas DataFrame'
raise ValueError(msg)
assert isinstance(WT, pd.DataFrame)
setattr(self, 'WT', WT.astype(np.int32))
del WT
def _read_ratios(self, ratios):
"""
Read Records adjustment ratios from file or
create empty DataFrame if None
"""
if ratios is None:
setattr(self, 'ADJ', pd.DataFrame({'nothing': []}))
return
if isinstance(ratios, six.string_types):
ratios_path = os.path.join(Records.CUR_PATH, ratios)
if os.path.isfile(ratios_path):
ADJ = pd.read_csv(ratios_path,
index_col=0)
else:
# cannot call read_egg_ function in unit tests
ADJ = read_egg_csv(os.path.basename(ratios_path),
index_col=0) # pragma: no cover
else:
msg = 'ratios is neither None nor a string'
raise ValueError(msg)
assert isinstance(ADJ, pd.DataFrame)
ADJ = ADJ.transpose()
if ADJ.index.name != 'agi_bin':
ADJ.index.name = 'agi_bin'
self.ADJ = pd.DataFrame()
setattr(self, 'ADJ', ADJ.astype(np.float32))
del ADJ
def _read_benefits(self, benefits):
"""
Read Records extrapolated benefits from a file or uses a specified
DataFrame or creates an empty DataFrame if None. Should only be
used with the cps.csv file
"""
if benefits is None:
setattr(self, 'BEN', pd.DataFrame({'Nothing': []}))
return
if isinstance(benefits, pd.DataFrame):
BEN_partial = benefits
elif isinstance(benefits, six.string_types):
benefits_path = os.path.join(Records.CUR_PATH, benefits)
if os.path.isfile(benefits_path):
BEN_partial = | pd.read_csv(benefits_path) | pandas.read_csv |
import re
import numpy as np
import pandas as pd
import pytest
from woodwork import DataTable
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
ZIPCode
)
def test_datatable_physical_types(sample_df):
dt = DataTable(sample_df)
assert isinstance(dt.physical_types, dict)
assert set(dt.physical_types.keys()) == set(sample_df.columns)
for k, v in dt.physical_types.items():
assert isinstance(k, str)
assert v == sample_df[k].dtype
def test_sets_category_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
pd.Series(['a', np.nan, 'c'], name=column_name),
pd.Series(['a', pd.NA, 'c'], name=column_name),
pd.Series(['a', pd.NaT, 'c'], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=['a', 'b', 'c']),
SubRegionCode,
ZIPCode,
]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_category_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['a', 'b', 'c'], name=column_name)
series = series.astype('object')
logical_types = [
Categorical,
CountryCode,
Ordinal(order=['a', 'b', 'c']),
SubRegionCode,
ZIPCode,
]
for logical_type in logical_types:
ltypes = {
column_name: NaturalLanguage,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
dt = DataTable(latlong_df.loc[:, [column_name]], logical_types=ltypes)
assert dt.columns[column_name].logical_type == LatLong
assert dt.columns[column_name].dtype == LatLong.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == LatLong.pandas_dtype
def test_sets_object_dtype_on_update(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: NaturalLanguage
}
dt = DataTable(latlong_df.loc[:, [column_name]], logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: LatLong})
assert dt.columns[column_name].logical_type == LatLong
assert dt.columns[column_name].dtype == LatLong.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == LatLong.pandas_dtype
def test_sets_string_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
pd.Series(['a', np.nan, 'c'], name=column_name),
pd.Series(['a', pd.NA, 'c'], name=column_name),
]
logical_types = [
Filepath,
FullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_string_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['a', 'b', 'c'], name=column_name)
series = series.astype('object')
logical_types = [
Filepath,
FullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for logical_type in logical_types:
ltypes = {
column_name: Categorical,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_boolean_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
pd.Series([True, np.nan, True], name=column_name),
pd.Series([True, pd.NA, True], name=column_name),
]
logical_type = Boolean
for series in series_list:
series = series.astype('object')
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_boolean_dtype_on_update():
column_name = 'test_series'
series = pd.Series([0, 1, 0], name=column_name)
series = series.astype('object')
ltypes = {
column_name: Integer,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: Boolean})
assert dt.columns[column_name].logical_type == Boolean
assert dt.columns[column_name].dtype == Boolean.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == Boolean.pandas_dtype
def test_sets_int64_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([1, 2, 3], name=column_name),
pd.Series([1, None, 3], name=column_name),
pd.Series([1, np.nan, 3], name=column_name),
pd.Series([1, pd.NA, 3], name=column_name),
]
logical_types = [Integer]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_int64_dtype_on_update():
column_name = 'test_series'
series = pd.Series([1.0, 2.0, 1.0], name=column_name)
series = series.astype('object')
logical_types = [Integer]
for logical_type in logical_types:
ltypes = {
column_name: Double,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_float64_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([1.1, 2, 3], name=column_name),
pd.Series([1.1, None, 3], name=column_name),
pd.Series([1.1, np.nan, 3], name=column_name),
]
logical_type = Double
for series in series_list:
series = series.astype('object')
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_float64_dtype_on_update():
column_name = 'test_series'
series = pd.Series([0, 1, 0], name=column_name)
series = series.astype('object')
ltypes = {
column_name: Integer,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: Double})
assert dt.columns[column_name].logical_type == Double
assert dt.columns[column_name].dtype == Double.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == Double.pandas_dtype
def test_sets_datetime64_dtype_on_init():
column_name = 'test_series'
series_list = [
| pd.Series(['2020-01-01', '2020-01-02', '2020-01-03'], name=column_name) | pandas.Series |
import datetime as dt
from numpy import nan
from numpy.testing import assert_equal
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from pymove import MoveDataFrame, datetime
from pymove.utils.constants import (
COUNT,
LOCAL_LABEL,
MAX,
MEAN,
MIN,
PREV_LOCAL,
STD,
SUM,
THRESHOLD,
TIME_TO_PREV,
)
default_date = dt.datetime.strptime('2018-03-12', '%Y-%m-%d')
default_date_time = dt.datetime.strptime('2018-03-12 12:08:07', '%Y-%m-%d %H:%M:%S')
str_date_default = '2018-03-12'
str_date_time_default = '2018-03-12 12:08:07'
list_data = [
[39.984094, 116.319236, '2008-10-23 05:44:05', 1],
[39.984198, 116.319322, '2008-10-23 05:56:06', 1],
[39.984224, 116.319402, '2008-10-23 05:56:11', 1],
[39.984224, 116.319402, '2008-10-23 06:10:15', 1],
]
def _default_move_df():
return MoveDataFrame(
data=list_data,
)
def test_date_to_str():
expected = '2008-10-23'
time_str = datetime.date_to_str(Timestamp('2008-10-23 05:53:05'))
assert(time_str == expected)
def test_str_to_datetime():
expected_date = default_date
expected_date_time = default_date_time
converted_date = datetime.str_to_datetime('2018-03-12')
assert(converted_date == expected_date)
converted_date_time = datetime.str_to_datetime('2018-03-12 12:08:07')
assert(converted_date_time == expected_date_time)
def test_to_str():
expected = str_date_time_default
data = default_date_time
str_date_time = datetime.to_str(data)
assert(str_date_time == expected)
def test_to_min():
expected = 25347608
data = default_date_time
date_to_min = datetime.to_min(data)
assert(date_to_min == expected)
def test_min_to_datetime():
expected = dt.datetime.strptime('2018-03-12 12:08:00',
'%Y-%m-%d %H:%M:%S')
data = 25347608
min_to_date = datetime.min_to_datetime(data)
assert(min_to_date == expected)
def test_to_day_of_week_int():
expected = 0
data = default_date
date_to_day_week = datetime.to_day_of_week_int(data)
assert(date_to_day_week == expected)
data = default_date_time
date_to_day_week = datetime.to_day_of_week_int(data)
assert(date_to_day_week == expected)
def test_working_day():
data = str_date_default
working_day = datetime.working_day(data)
assert(working_day is True)
data = default_date
working_day = datetime.working_day(data)
assert(working_day is True)
data = '2018-03-17'
working_day = datetime.working_day(data)
assert(working_day is False)
data = dt.datetime.strptime('2018-10-12', '%Y-%m-%d')
working_day = datetime.working_day(data, country='BR')
assert(working_day is False)
def test_now_str():
expected = datetime.to_str(dt.datetime.now())
time_now = datetime.now_str()
assert(time_now == expected)
def test_deltatime_str():
expected = '05.03s'
actual = datetime.deltatime_str(5.03)
assert expected == actual
expected = '18m:35.00s'
actual = datetime.deltatime_str(1115)
assert expected == actual
expected = '03h:05m:15.00s'
actual = datetime.deltatime_str(11115)
assert expected == actual
def test_timestamp_to_millis():
expected = 1520856487000
data = str_date_time_default
milliseconds = datetime.timestamp_to_millis(data)
assert(milliseconds == expected)
def test_millis_to_timestamp():
expected = default_date_time
data = 1520856487000
timestamp = datetime.millis_to_timestamp(data)
assert(timestamp == expected)
def test_time_to_str():
expected = '12:08:07'
data = default_date_time
time = datetime.time_to_str(data)
assert(time == expected)
def test_elapsed_time_dt():
data = default_date_time
expected = datetime.diff_time(default_date_time,
dt.datetime.now())
elapsed_time = datetime.elapsed_time_dt(data)
assert abs(elapsed_time - expected) <= 5
def test_diff_time():
expected = 388313000
start_date = default_date_time
end_date = dt.datetime.strptime('2018-03-17', '%Y-%m-%d')
diff_time = datetime.diff_time(start_date, end_date)
assert(diff_time == expected)
def test_create_time_slot_in_minute():
df = _default_move_df()
expected = DataFrame({
'lat': {0: 39.984094, 1: 39.984198, 2: 39.984224, 3: 39.984224},
'lon': {0: 116.319236, 1: 116.319322, 2: 116.319402, 3: 116.319402},
'datetime': {
0: Timestamp('2008-10-23 05:44:05'),
1: Timestamp('2008-10-23 05:56:06'),
2: Timestamp('2008-10-23 05:56:11'),
3: Timestamp('2008-10-23 06:10:15')
},
'id': {0: 1, 1: 1, 2: 1, 3: 1},
'time_slot': {0: 22, 1: 23, 2: 23, 3: 24}
})
datetime.create_time_slot_in_minute(df)
| assert_frame_equal(df, expected) | pandas.testing.assert_frame_equal |
import os
import requests
from time import sleep, time
import pandas as pd
from polygon import RESTClient
from dotenv import load_dotenv, find_dotenv
from FileOps import FileReader, FileWriter
from TimeMachine import TimeTraveller
from Constants import PathFinder
import Constants as C
class MarketData:
def __init__(self):
load_dotenv(find_dotenv('config.env'))
self.writer = FileWriter()
self.reader = FileReader()
self.finder = PathFinder()
self.traveller = TimeTraveller()
self.provider = 'iexcloud'
def get_indexer(self, s1, s2):
return list(s1.intersection(s2))
def try_again(self, func, **kwargs):
retries = (kwargs['retries']
if 'retries' in kwargs
else C.DEFAULT_RETRIES)
delay = (kwargs['delay']
if 'delay' in kwargs
else C.DEFAULT_DELAY)
func_args = {k: v for k, v in kwargs.items() if k not in {
'retries', 'delay'}}
for retry in range(retries):
try:
return func(**func_args)
except Exception as e:
if retry == retries - 1:
raise e
else:
sleep(delay)
def get_symbols(self):
# get cached list of symbols
symbols_path = self.finder.get_symbols_path()
return list(self.reader.load_csv(symbols_path)[C.SYMBOL])
def get_dividends(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_dividends_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.EX, timeframe)
return filtered
def standardize(self, df, full_mapping,
filename, columns, default):
mapping = {k: v for k, v in full_mapping.items() if k in df}
df = df[list(mapping)].rename(columns=mapping)
time_col, val_cols = columns[0], columns[1:]
if time_col in df and set(val_cols).issubset(df.columns):
df = self.reader.update_df(
filename, df, time_col).sort_values(by=[time_col])
# since time col is pd.datetime,
# consider converting to YYYY-MM-DD str format
for val_col in val_cols:
df[val_col] = df[val_col].apply(
lambda val: float(val) if val else default)
return df
def standardize_dividends(self, symbol, df):
full_mapping = dict(
zip(
['exDate', 'paymentDate', 'declaredDate', 'amount'],
[C.EX, C.PAY, C.DEC, C.DIV]
)
)
filename = self.finder.get_dividends_path(symbol, self.provider)
return self.standardize(
df,
full_mapping,
filename,
[C.EX, C.DIV],
0
)
def save_dividends(self, **kwargs):
# given a symbol, save its dividend history
symbol = kwargs['symbol']
filename = self.finder.get_dividends_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_dividends(**kwargs), C.EX, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def get_splits(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_splits_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.EX, timeframe)
return filtered
def standardize_splits(self, symbol, df):
full_mapping = dict(
zip(
['exDate', 'paymentDate', 'declaredDate', 'ratio'],
[C.EX, C.PAY, C.DEC, C.RATIO]
)
)
filename = self.finder.get_splits_path(symbol, self.provider)
return self.standardize(
df,
full_mapping,
filename,
[C.EX, C.RATIO],
1
)
def save_splits(self, **kwargs):
# given a symbol, save its splits history
symbol = kwargs['symbol']
filename = self.finder.get_splits_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_splits(**kwargs), C.EX, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_ohlc(self, symbol, df, filename=None):
full_mapping = dict(
zip(
['date', 'open', 'high', 'low', 'close',
'volume', 'average', 'trades'],
[C.TIME, C.OPEN, C.HIGH, C.LOW, C.CLOSE,
C.VOL, C.AVG, C.TRADES]
)
)
filename = filename or self.finder.get_ohlc_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.OPEN, C.HIGH, C.LOW, C.CLOSE],
0
)
for col in [C.VOL, C.TRADES]:
if col in df:
df[col] = df[col].apply(
lambda val: 0 if pd.isnull(val) else int(val))
return df
def get_ohlc(self, symbol, timeframe='max'):
df = self.reader.load_csv(
self.finder.get_ohlc_path(symbol, self.provider))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)
return filtered
def save_ohlc(self, **kwargs):
symbol = kwargs['symbol']
filename = self.finder.get_ohlc_path(symbol, self.provider)
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_ohlc(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def get_social_sentiment(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sentiment_path(symbol))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.POS, C.NEG]]
return filtered
def get_social_volume(self, symbol, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sentiment_path(symbol))
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.VOL, C.DELTA]]
return filtered
def save_social_sentiment(self, **kwargs):
# # given a symbol, save its sentiment data
symbol = kwargs['symbol']
filename = self.finder.get_sentiment_path(symbol)
if os.path.exists(filename):
os.remove(filename)
sen_df = self.reader.update_df(
filename, self.get_social_sentiment(**kwargs), C.TIME)
sen_df = sen_df[self.get_indexer(
{C.TIME, C.POS, C.NEG}, sen_df.columns)]
vol_df = self.reader.update_df(
filename, self.get_social_volume(**kwargs), C.TIME)
vol_df = vol_df[self.get_indexer(
{C.TIME, C.VOL, C.DELTA}, vol_df.columns)]
if sen_df.empty and not vol_df.empty:
df = vol_df
elif not sen_df.empty and vol_df.empty:
df = sen_df
elif not sen_df.empty and not vol_df.empty:
df = sen_df.merge(vol_df, how="outer", on=C.TIME)
else:
return
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_sentiment(self, symbol, df):
full_mapping = dict(
zip(
['timestamp', 'bullish', 'bearish'],
[C.TIME, C.POS, C.NEG]
)
)
filename = self.finder.get_sentiment_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.POS, C.NEG],
0
)
return df[self.get_indexer({C.TIME, C.POS, C.NEG}, df.columns)]
def standardize_volume(self, symbol, df):
full_mapping = dict(
zip(
['timestamp', 'volume_score', 'volume_change'],
[C.TIME, C.VOL, C.DELTA]
)
)
filename = self.finder.get_sentiment_path(symbol, self.provider)
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.VOL, C.DELTA],
0
)
return df[self.get_indexer({C.TIME, C.VOL, C.DELTA}, df.columns)]
def get_intraday(self, symbol, min=1, timeframe='max', extra_hrs=False):
# implement way to transform 1 min dataset to 5 min data
# or 30 or 60 should be flexible soln
# implement way to only get market hours
# given a symbol, return a cached dataframe
dates = self.traveller.dates_in_range(timeframe)
for date in dates:
df = self.reader.load_csv(
self.finder.get_intraday_path(symbol, date, self.provider))
yield self.reader.data_in_timeframe(df, C.TIME, timeframe)
def save_intraday(self, **kwargs):
symbol = kwargs['symbol']
dfs = self.get_intraday(**kwargs)
filenames = []
for df in dfs:
date = df[C.TIME].iloc[0].strftime(C.DATE_FMT)
filename = self.finder.get_intraday_path(
symbol, date, self.provider)
if os.path.exists(filename):
os.remove(filename)
save_fmt = f'{C.DATE_FMT} {C.TIME_FMT}'
df = self.reader.update_df(
filename, df, C.TIME, save_fmt)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
filenames.append(filename)
return filenames
def get_unemployment_rate(self, timeframe='max'):
# given a timeframe, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_unemployment_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)
return filtered
def standardize_unemployment(self, df):
full_mapping = dict(
zip(
['time', 'value'],
[C.TIME, C.UN_RATE]
)
)
filename = self.finder.get_unemployment_path()
return self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.UN_RATE],
0
)
def save_unemployment_rate(self, **kwargs):
# given a symbol, save its dividend history
filename = self.finder.get_unemployment_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_unemployment_rate(**kwargs), C.TIME, '%Y-%m')
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_s2f_ratio(self, df):
full_mapping = dict(
zip(
['t', 'o.daysTillHalving', 'o.ratio'],
[C.TIME, C.HALVING, C.RATIO]
)
)
filename = self.finder.get_s2f_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.HALVING, C.RATIO],
0
)
return df[self.get_indexer({C.TIME, C.HALVING, C.RATIO}, df.columns)]
def get_s2f_ratio(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_s2f_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.HALVING, C.RATIO]]
return filtered
def save_s2f_ratio(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_s2f_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_s2f_ratio(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_diff_ribbon(self, df):
full_mapping = dict(
zip(
['t', 'o.ma9', 'o.ma14', 'o.ma25', 'o.ma40',
'o.ma60', 'o.ma90', 'o.ma128', 'o.ma200'],
[C.TIME] + C.MAs
)
)
filename = self.finder.get_diff_ribbon_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME] + C.MAs,
0
)
return df[self.get_indexer(set([C.TIME] + C.MAs), df.columns)]
def get_diff_ribbon(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_diff_ribbon_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME] + C.MAs]
return filtered
def save_diff_ribbon(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_diff_ribbon_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_diff_ribbon(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
def standardize_sopr(self, df):
full_mapping = dict(
zip(
['t', 'v'],
[C.TIME, C.SOPR]
)
)
filename = self.finder.get_diff_ribbon_path()
df = self.standardize(
df,
full_mapping,
filename,
[C.TIME, C.SOPR],
1
)
return df[self.get_indexer({C.TIME, C.SOPR}, df.columns)]
def get_sopr(self, timeframe='max'):
# given a symbol, return a cached dataframe
df = self.reader.load_csv(
self.finder.get_sopr_path())
filtered = self.reader.data_in_timeframe(df, C.TIME, timeframe)[
[C.TIME, C.SOPR]]
return filtered
def save_sopr(self, **kwargs):
# # given a symbol, save its s2f data
filename = self.finder.get_sopr_path()
if os.path.exists(filename):
os.remove(filename)
df = self.reader.update_df(
filename, self.get_sopr(**kwargs), C.TIME, C.DATE_FMT)
self.writer.update_csv(filename, df)
if os.path.exists(filename):
return filename
# def handle_request(self, url, err_msg):
class IEXCloud(MarketData):
def __init__(self, test=False):
super().__init__()
self.version = 'v1'
self.provider = 'iexcloud'
if test:
self.base = 'https://sandbox.iexapis.com'
self.token = os.environ['IEXCLOUD_SANDBOX']
else:
self.base = 'https://cloud.iexapis.com'
self.token = os.environ['IEXCLOUD']
def get_dividends(self, **kwargs):
# given a symbol, return the dividend history
def _get_dividends(symbol, timeframe='3m'):
category = 'stock'
dataset = 'dividends'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
timeframe
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = [datum for datum in response.json() if datum['flag']
== 'Cash' and datum['currency'] == 'USD']
else:
raise Exception(
f'Invalid response from IEX for {symbol} dividends.')
if data == []:
return empty
df = self.standardize_dividends(symbol, pd.DataFrame(data))
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_dividends, **kwargs)
def get_splits(self, **kwargs):
# given a symbol, return the stock splits
def _get_splits(symbol, timeframe='3m'):
category = 'stock'
dataset = 'splits'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
timeframe
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} splits.')
if data == []:
return empty
df = self.standardize_splits(symbol, pd.DataFrame(data))
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_splits, **kwargs)
def get_ohlc(self, **kwargs):
def _get_prev_ohlc(symbol):
category = 'stock'
dataset = 'previous'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} OHLC.')
if data == []:
return empty
df = pd.DataFrame([data])
return self.standardize_ohlc(symbol, df)
def _get_ohlc(symbol, timeframe='1m'):
if timeframe == '1d':
return _get_prev_ohlc(symbol)
category = 'stock'
dataset = 'chart'
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
timeframe
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
empty = pd.DataFrame()
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} OHLC.')
if data == []:
return empty
df = self.standardize_ohlc(symbol, pd.DataFrame(data))
return self.reader.data_in_timeframe(df, C.TIME, timeframe)
return self.try_again(func=_get_ohlc, **kwargs)
# extra_hrs should be True if possible
def get_intraday(self, **kwargs):
def _get_intraday(symbol, min=1, timeframe='max', extra_hrs=True):
# pass min directly into hist prices endpoint
# to get 1, 5, 30, 60 min granularity if possible
# and get extra hrs if possible
category = 'stock'
dataset = 'chart'
dates = self.traveller.dates_in_range(timeframe)
if dates == []:
raise Exception(f'No dates in timeframe: {timeframe}.')
for date in dates:
parts = [
self.base,
self.version,
category,
symbol.lower(),
dataset,
'date',
date.replace('-', '')
]
url = '/'.join(parts)
params = {'token': self.token}
response = requests.get(url, params=params)
if response.ok:
data = response.json()
else:
raise Exception(
f'Invalid response from IEX for {symbol} intraday.')
if data == []:
continue
df = pd.DataFrame(data)
df['date'] = pd.to_datetime(df['date'] + ' ' + df['minute'])
# if all values are na except time, then skip
num_data_rows = len(
df.drop(columns=['date', 'minute']).dropna(how='all'))
if (num_data_rows == 0):
continue
res_cols = ['date', 'minute', 'marketOpen', 'marketHigh',
'marketLow', 'marketClose', 'marketVolume',
'marketAverage', 'marketNumberOfTrades']
std_cols = ['date', 'minute', 'open', 'high', 'low',
'close', 'volume', 'average', 'trades']
columns = dict(zip(res_cols, std_cols))
df = df[res_cols].rename(columns=columns)
df.drop(columns='minute', inplace=True)
filename = self.finder.get_intraday_path(
symbol, date, self.provider)
df = self.standardize_ohlc(symbol, df, filename)
yield df
return self.try_again(func=_get_intraday, **kwargs)
class Polygon(MarketData):
def __init__(self, token=os.environ.get('POLYGON'), free=True):
super().__init__()
self.client = RESTClient(token)
self.provider = 'polygon'
self.free = free
def obey_free_limit(self):
if self.free and hasattr(self, 'last_api_call_time'):
time_since_last_call = time() - self.last_api_call_time
delay = C.POLY_FREE_DELAY - time_since_last_call
if delay > 0:
sleep(delay)
def log_api_call_time(self):
self.last_api_call_time = time()
def get_dividends(self, **kwargs):
def _get_dividends(symbol, timeframe='max'):
self.obey_free_limit()
try:
response = self.client.reference_stock_dividends(symbol)
except Exception as e:
raise e
finally:
self.log_api_call_time()
raw = pd.DataFrame(response.results)
df = self.standardize_dividends(symbol, raw)
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_dividends, **kwargs)
def get_splits(self, **kwargs):
def _get_splits(symbol, timeframe='max'):
self.obey_free_limit()
try:
response = self.client.reference_stock_splits(symbol)
except Exception as e:
raise e
finally:
self.log_api_call_time()
raw = pd.DataFrame(response.results)
df = self.standardize_splits(symbol, raw)
return self.reader.data_in_timeframe(df, C.EX, timeframe)
return self.try_again(func=_get_splits, **kwargs)
def get_ohlc(self, **kwargs):
def _get_ohlc(symbol, timeframe='max'):
is_crypto = symbol.find('X%3A') == 0
formatted_start, formatted_end = self.traveller.convert_dates(
timeframe)
self.obey_free_limit()
try:
response = self.client.stocks_equities_aggregates(
symbol, 1, 'day',
from_=formatted_start, to=formatted_end, unadjusted=False
)
except Exception as e:
raise e
finally:
self.log_api_call_time()
raw = response.results
columns = {'t': 'date', 'o': 'open', 'h': 'high',
'l': 'low', 'c': 'close', 'v': 'volume',
'vw': 'average', 'n': 'trades'}
df = pd.DataFrame(raw).rename(columns=columns)
if is_crypto:
df['date'] = pd.to_datetime(
df['date'], unit='ms')
else:
df['date'] = pd.to_datetime(
df['date'], unit='ms').dt.tz_localize(
'UTC').dt.tz_convert(
C.TZ).dt.tz_localize(None)
df = self.standardize_ohlc(symbol, df)
return self.reader.data_in_timeframe(df, C.TIME, timeframe)
return self.try_again(func=_get_ohlc, **kwargs)
def get_intraday(self, **kwargs):
def _get_intraday(symbol, min=1, timeframe='max', extra_hrs=True):
# pass min directly into stock_aggs function as multiplier
is_crypto = symbol.find('X%3A') == 0
dates = self.traveller.dates_in_range(timeframe)
if dates == []:
raise Exception(f'No dates in timeframe: {timeframe}.')
for idx, date in enumerate(dates):
self.obey_free_limit()
try:
response = self.client.stocks_equities_aggregates(
symbol, min, 'minute', from_=date, to=date,
unadjusted=False
)
except Exception as e:
raise e
finally:
self.log_api_call_time()
if hasattr(response, 'results'):
response = response.results
else:
continue
columns = {'t': 'date', 'o': 'open', 'h': 'high',
'l': 'low', 'c': 'close', 'v': 'volume',
'vw': 'average', 'n': 'trades'}
df = pd.DataFrame(response).rename(columns=columns)
if is_crypto:
df['date'] = pd.to_datetime(
df['date'], unit='ms')
else:
df['date'] = pd.to_datetime(
df['date'], unit='ms').dt.tz_localize(
'UTC').dt.tz_convert(
C.TZ).dt.tz_localize(None)
filename = self.finder.get_intraday_path(
symbol, date, self.provider)
df = self.standardize_ohlc(symbol, df, filename)
df = df[df[C.TIME].dt.strftime(C.DATE_FMT) == date]
yield df
return self.try_again(func=_get_intraday, **kwargs)
# newShares = oldShares / ratio
class StockTwits(MarketData):
def __init__(self):
super().__init__()
self.base = 'https://api.stocktwits.com'
self.version = '2'
self.token = os.environ.get('STOCKTWITS')
self.provider = 'stocktwits'
def get_social_volume(self, **kwargs):
def _get_social_volume(symbol, timeframe='max'):
parts = [
self.base,
'api',
self.version,
'symbols',
symbol,
'volume.json'
]
url = '/'.join(parts)
params = {'access_token': self.token}
vol_res = requests.get(url, params=params)
json_res = vol_res.json()
empty = pd.DataFrame()
if vol_res.ok:
vol_data = json_res['data']
else:
if 'errors' in json_res:
errors = '\n'.join([error['message']
for error in json_res['errors']])
raise Exception(
f'Invalid response from Stocktwits for {symbol}\n{errors}')
if vol_data == []:
return empty
vol_data.sort(key=lambda x: x['timestamp'])
vol_data.pop()
df = pd.DataFrame(vol_data)
std = self.standardize_volume(symbol, df)
if timeframe == '1d':
filtered = std.tail(1)
else:
filtered = self.reader.data_in_timeframe(
std, C.TIME, timeframe)
[[C.TIME, C.VOL, C.DELTA]]
return filtered
return self.try_again(func=_get_social_volume, **kwargs)
def get_social_sentiment(self, **kwargs):
def _get_social_sentiment(symbol, timeframe='max'):
parts = [
self.base,
'api',
self.version,
'symbols',
symbol,
'sentiment.json'
]
url = '/'.join(parts)
params = {'access_token': self.token}
sen_res = requests.get(url, params=params)
json_res = sen_res.json()
empty = | pd.DataFrame() | pandas.DataFrame |
"""
Tests for zipline/utils/pandas_utils.py
"""
from unittest import skipIf
import pandas as pd
from zipline.testing import parameter_space, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.pandas_utils import (
categorical_df_concat,
nearest_unequal_elements,
new_pandas,
skip_pipeline_new_pandas,
)
class TestNearestUnequalElements(ZiplineTestCase):
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements(self, tz):
dts = pd.to_datetime(
['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09'],
).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-30', None, '2014-01-01'),
('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, '2014-01-05'),
('2014-01-02', '2014-01-01', '2014-01-05'),
('2014-01-03', '2014-01-01', '2014-01-05'),
('2014-01-04', '2014-01-01', '2014-01-05'),
('2014-01-05', '2014-01-01', '2014-01-06'),
('2014-01-06', '2014-01-05', '2014-01-09'),
('2014-01-07', '2014-01-06', '2014-01-09'),
('2014-01-08', '2014-01-06', '2014-01-09'),
('2014-01-09', '2014-01-06', None),
('2014-01-10', '2014-01-09', None),
('2014-01-11', '2014-01-09', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements_short_dts(self, tz):
# Length 1.
dts = pd.to_datetime(['2014-01-01']).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, None),
('2014-01-02', '2014-01-01', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
# Length 0
dts = pd.to_datetime([]).tz_localize(tz)
for dt, before, after in (('2013-12-31', None, None),
('2014-01-01', None, None),
('2014-01-02', None, None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
def test_nearest_unequal_bad_input(self):
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
| pd.to_datetime(['2014', '2014']) | pandas.to_datetime |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = | pd.DataFrame() | pandas.DataFrame |
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics)
def get_ml_op(
start_date : str,
pre_processed_dataset : Input[Dataset],
bros_dataset : Input[Dataset],
dic_model_dataset : Output[Dataset],
dic_df_pred_dataset : Output[Dataset],
prediction_result_dataset : Output[Dataset]
) -> str :
DESC = "model m19-12 / Classifier / change_p1_over1 / no bros / Incl top30 / Incl. KODEX ETN / +-25% Training / +- 25% Prediction / mCap 500억 이상, 5조 이하"
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from catboost import Pool
from catboost import CatBoostClassifier, CatBoostRegressor
# Load Dataset
df_preP = pd.read_pickle(pre_processed_dataset.path)
df_bros = pd.read_pickle(bros_dataset.path)
# Dates things ...
l_dates = df_preP.date.unique().tolist()
print(f'df_preP start from {l_dates[0]} end at {l_dates[-1]} shape : {df_preP.shape}')
idx_start = l_dates.index(start_date)
print(f'index of start date : {idx_start}')
period = int(l_dates.__len__() - idx_start)
# get Univ df
def get_15pct_univ_in_period(df, l_dates): # input dataframe : top30s in the period
print(f'length of l_date : {l_dates.__len__()}')
df_univ = pd.DataFrame()
for date in l_dates :
df_of_the_day = df[df.date == date]
df_of_the_day = df_of_the_day[(df_of_the_day.mkt_cap > 500) & (df_of_the_day.mkt_cap < 50000)]
df_15pct_of_the_day = df_of_the_day[(df_of_the_day.change >= -0.25) & (df_of_the_day.change <= 0.25)]
# l_codes = df_15pct_of_the_day.code.unique().tolist()
# df_bros_in_date = df_bros[df_bros.date == date]
# l_bros_of_top30s = df_bros_in_date[\
# df_bros_in_date.source.isin(l_codes)].target.unique().tolist()
# df_bros_of_top30 = df_of_the_day[df_of_the_day.code.isin(l_bros_of_top30s)]
df_ = df_15pct_of_the_day #.append(df_bros_of_top30)
df_.drop_duplicates(subset=['code', 'date'], inplace=True)
df_univ = df_univ.append(df_)
return df_univ
# Set Target and Feats
# target_col = ['target_close_over_10']
target_col = ['change_p1_over1']
cols_indicator = [ 'code', 'name', 'date', ]
features = [
# 'code',
# 'name',
# 'date',
# 'rank',
'mkt_cap',
# 'mkt_cap_cat',
'in_top30',
# 'rank_mean_10',
# 'rank_mean_5',
'in_top_30_5',
'in_top_30_10',
'in_top_30_20',
# 'up_bro_ratio_20',
# 'up_bro_ratio_40',
# 'up_bro_ratio_60',
# 'up_bro_ratio_90',
# 'up_bro_ratio_120',
# 'n_bro_20',
# 'n_bro_40',
# 'n_bro_60',
# 'n_bro_90',
# 'n_bro_120',
# 'all_bro_rtrn_mean_20',
# 'all_bro_rtrn_mean_40',
# 'all_bro_rtrn_mean_60',
# 'all_bro_rtrn_mean_90',
# 'all_bro_rtrn_mean_120',
# 'up_bro_rtrn_mean_20',
# 'up_bro_rtrn_mean_40',
# 'up_bro_rtrn_mean_60',
# 'up_bro_rtrn_mean_90',
# 'up_bro_rtrn_mean_120',
# 'all_bro_rtrn_mean_ystd_20',
# 'all_bro_rtrn_mean_ystd_40',
# 'all_bro_rtrn_mean_ystd_60',
# 'all_bro_rtrn_mean_ystd_90',
# 'all_bro_rtrn_mean_ystd_120',
# 'bro_up_ratio_ystd_20',
# 'bro_up_ratio_ystd_40',
# 'bro_up_ratio_ystd_60',
# 'bro_up_ratio_ystd_90',
# 'bro_up_ratio_ystd_120',
# 'up_bro_rtrn_mean_ystd_20',
# 'up_bro_rtrn_mean_ystd_40',
# 'up_bro_rtrn_mean_ystd_60',
# 'up_bro_rtrn_mean_ystd_90',
# 'up_bro_rtrn_mean_ystd_120',
# 'index',
# 'open_x',
# 'high_x',
# 'low_x',
# 'close_x',
# 'volume_x',
# 'change_x',
# 'high_p1',
# 'high_p2',
# 'high_p3',
# 'close_p1',
# 'close_p2',
# 'close_p3',
# 'change_p1',
# 'change_p2',
# 'change_p3',
# 'change_p1_over5',
# 'change_p2_over5',
# 'change_p3_over5',
# 'change_p1_over10',
# 'change_p2_over10',
# 'change_p3_over10',
# 'close_high_1',
# 'close_high_2',
# 'close_high_3',
# 'close_high_1_over10',
# 'close_high_2_over10',
# 'close_high_3_over10',
# 'close_high_1_over5',
# 'close_high_2_over5',
# 'close_high_3_over5',
# 'open_y',
# 'high_y',
# 'low_y',
# 'close_y',
# 'volume_y',
# 'change_y',
# 'macd',
# 'boll_ub',
# 'boll_lb',
# 'rsi_30',
# 'dx_30',
# 'close_30_sma',
# 'close_60_sma',
'daily_return',
'return_lag_1',
'return_lag_2',
'return_lag_3',
'bb_u_ratio',
'bb_l_ratio',
# 'max_scale_MACD',
'volume_change_wrt_10max',
'volume_change_wrt_5max',
'volume_change_wrt_20max',
'volume_change_wrt_10mean',
'volume_change_wrt_5mean',
'volume_change_wrt_20mean',
'close_ratio_wrt_10max',
'close_ratio_wrt_10min',
'oh_ratio',
'oc_ratio',
'ol_ratio',
'ch_ratio',
# 'Symbol',
# 'DesignationDate',
# 'admin_stock',
# 'dayofweek'
]
# Model training and prediction
df_pred_all = | pd.DataFrame() | pandas.DataFrame |
# import necessary libraries
import pandas as pd
import os
import matplotlib.pyplot as plt
from itertools import combinations
from collections import Counter
def get_city(address):
return address.split(',')[1]
def get_state(address):
return address.split(',')[2].split(' ')[1]
# plt.style.use('fivethirtyeight')
# Merging 12 months of sales data into a singe file
data1 = pd.read_csv('D:/Phyton Code/Contoh dari Github/Pandas-Data-Science-\
Tasks-master/SalesAnalysis/Sales_Data/Sales_April_2019.csv')
all_data = pd.DataFrame()
files = [file for file in os.listdir('D:/Phyton Code/Contoh dari Github/\
Pandas-Data-Science-Tasks-master/SalesAnalysis/Sales_Data')]
for file in files:
temp = pd.read_csv('D:/Phyton Code/Contoh dari Github/Pandas-Data-Science-\
Tasks-master/SalesAnalysis/Sales_Data/' + file)
all_data = pd.concat([all_data, temp])
# Clean up Nan Values
all_data.isnull().sum()
all_data.dropna(axis=0, how='any', inplace=True)
all_data = all_data[all_data['Order Date'].str[0:2] != 'Or']
# Add an additional column
all_data['Month'] = all_data['Order Date'].str[0:2]
all_data['Month'] = all_data['Month'].astype('int32')
all_data['Quantity Ordered'] = pd.to_numeric(all_data['Quantity Ordered'])
all_data['Price Each'] = | pd.to_numeric(all_data['Price Each']) | pandas.to_numeric |
"""
This script creates a boolean mask based on rules
1. is it boreal forest zone
2. In 2000, was there sufficent forest
"""
#==============================================================================
__title__ = "FRI calculator for the other datasets"
__author__ = "<NAME>"
__version__ = "v1.0(21.08.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from netCDF4 import Dataset, num2date, date2num
from scipy import stats
# import rasterio
import xarray as xr
from dask.diagnostics import ProgressBar
from numba import jit
import bottleneck as bn
import scipy as sp
from scipy import stats
import glob
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# import regionmask as rm
# import itertools
# Import debugging packages
import ipdb
# from rasterio.warp import transform
from shapely.geometry import Polygon
# import geopandas as gpd
# from rasterio import features
# from affine import Affine
# +++++ Import my packages +++++
import myfunctions.corefunctions as cf
# import MyModules.PlotFunctions as pf
# import MyModules.NetCDFFunctions as ncf
#==============================================================================
def main():
# ========== Setup the paths ==========
TCF = 10
# TCF = 50
dpath, chunksize = syspath()
data = datasets(dpath, chunksize, TCF=TCF)
# ========== select and analysis scale ==========
mwbox = [1]#, 2, 5]#, 10] #in decimal degrees
maskds = "esacci"
maskforce = False # Added to allow skiping of the mask
for dsn in data:
print(dsn)
# ========== Set up the filename and global attributes =========
if dsn.startswith("HANSEN"):
ppath = dpath + "/BurntArea/HANSEN/FRI/"
force = True
else:
ppath = dpath + "/BurntArea/%s/FRI/" % dsn
force = False
cf.pymkdir(ppath)
# ========== Get the dataset =========
mask = landseamaks(data, dsn, dpath, maskforce )
# ========== Calculate the annual burn frewuency =========
ds_ann = ANNcalculator(data, dsn, mask, force, ppath, dpath, chunksize, TCF)
# force = True
# breakpoint()
# ========== work out the FRI ==========
FRIcal(ds_ann, mask, dsn, force, ppath, dpath, mwbox, data, chunksize, TCF)
# force = False
print(dsn, " Complete at:", pd.Timestamp.now())
ipdb.set_trace()
#==============================================================================
def FRIcal(ds_ann, mask, dsn, force, ppath, dpath, mwbox, data, chunksize, TCF):
""""""
""" Function to caluclate the FRI at different resolutions """
# ========== Add a loading string for forest cover in Hansen Datasets ==========
if dsn.startswith("HANSEN") and (TCF > 0.):
tcfs = "_%dperTC" % np.round(TCF)
else:
tcfs = ""
# ========== Setup a working path ==========
tpath = ppath+"tmp/"
# ========== work out the ration ==========
pix = abs(np.unique(np.diff(ds_ann.latitude.values))[0])
# ds_ann = ds_ann.chunk({"latitude":chunksize, "longitude":-1})
print(f"Loading annual data into ram at: {pd.Timestamp.now()}")
ds_ann.persist()
# ========== Build a cleanup list ==========
cleanup = []
for mwb in mwbox:
print("Starting %s %d degree moving window at:" %(dsn, mwb), pd.Timestamp.now())
fname = "%s%s_annual_burns_MW_%ddegreeBox.nc" % (dsn, tcfs, mwb)
tname = "%s%s_annual_burns_lonMW_%ddegreeBox.nc" % (dsn, tcfs, mwb)
tname2 = "%s%s_annual_burns_latMW_%ddegreeBox.nc" % (dsn, tcfs, mwb)
tMnme = "%s%s_annual_burns_lonMW_tmpmask_%ddegreeBox.nc" % (dsn, tcfs, mwb)
# ========== Check if a valid file already exists ==========
if os.path.isfile(ppath+fname) and not force:
cleanup.append(ppath+tname)
cleanup.append(tpath+tMnme)
continue
# ===== get the ratio =====
SF = np.round(mwb /pix).astype(int)
# # ===== Create a masksum =====
# warn.warn("I need to reimplement the mask here:")
def _maskmaker(SF, mask, tpath, tMnme, dsn):
mask_sum = mask.fillna(0).rolling({"longitude":SF}, center = True, min_periods=1).sum(skipna=False)
print("Mask Role 1:", pd.Timestamp.now())
mask_sum = mask_sum.rolling({"latitude":SF}, center = True, min_periods=1).sum(skipna=False)
mask_sum = (mask_sum > ((SF/2)**2)).astype("int16")
print("Mask Role 2:", pd.Timestamp.now())
if dsn.startswith("HANSEN"):
mask_sum = mask_sum.sortby("latitude", ascending=False)
mask_sum = mask_sum.sel(dict(latitude=slice(70.0, 40.0), longitude=slice(-10.0, 180.0)))
mask_sum = tempNCmaker(mask_sum, tpath, tMnme, "landwater",
None, readchunks={'longitude': 500}, skip=False)
mask_sum.close()
mask_sum = None
if not os.path.isfile(tpath + tMnme):
_maskmaker(SF, mask, tpath, tMnme, dsn)
# sys.exit()
print("Mask reload:", pd.Timestamp.now())
mask_sum = xr.open_dataset(tpath+tMnme)
# continue
# This is so i can count the number of values that are valid in each location
# breakpoint()
# ===== Calculate the Moving window on dim 1 =====
if dsn.startswith("HANSEN") and (TCF > 0.):
# Hansen use TCF weights as it cannot measure loss in non forest
# ========== Read in the weights ==========
fn_wei = dpath + f"/BurntArea/HANSEN/lossyear/Hansen_GFC-2018-v1.6_weights_SIBERIAatesacci{tcfs}.nc"
weights = xr.open_dataset(fn_wei)#.rename({"lossyear":"AnBF"})
# ========== Do the MV on the weights ==========
print(f"Starting the MW for the weight calculation at {pd.Timestamp.now()}")
ds_ww = weights.rolling({"longitude":SF},center = True, min_periods=1).sum(skipna=False)
ds_ww = ds_ww.rolling({"latitude":SF},center = True, min_periods=1).sum(skipna=False).compute()
# ========== Multiply the raw weights by the ds_ann then roll the ds_ann ==========
print(f"Starting the MW for {dsn} at {pd.Timestamp.now()}")
dsan_lons = (ds_ann.fillna(0)*weights.lossyear.values).rolling(
{"longitude":SF}, center = True, min_periods=1).sum(skipna=False)
ds_out = dsan_lons.rolling({"latitude":SF}, center = True, min_periods=1).sum(skipna=False)
# ========== Divide the rolled MW rolled weights ==========
ds_out["AnBF"] /= ds_ww.lossyear.values
# breakpoint()
else:
if dsn == 'GFED':
print(f"Loading {dsn} data into ram at", pd.Timestamp.now())
with ProgressBar():
dsan_lons = ds_ann.rolling({"longitude":SF}, center = True, min_periods=1).mean().compute()
else:
dsan_lons = ds_ann.rolling({"longitude":SF}, center = True, min_periods=1).mean()
dsan_lons = tempNCmaker(
dsan_lons, tpath, tname, "AnBF",
{'latitude': chunksize}, readchunks={'longitude': chunksize}, skip=False)
print(f"Loading temp rolled dataset data into ram at: {pd.Timestamp.now()}")
if dsn == 'GFED':
with ProgressBar():
dsan_lons = dsan_lons.compute()
else:
dsan_lons.persist()
# ===== Calculate the Moving window in the other dim =====
ds_out = dsan_lons.rolling({"latitude":SF}, center = True, min_periods=1).mean()
# ========== Mask out bad pixels ==========
# ===== Deal with the locations with no fire history =====
ds_out = ds_out.where(ds_out > 0, 0.000001)
ds_out = ds_out.where(mask["landwater"].values == 1) #Mask out water
ds_out = ds_out.where(mask_sum["landwater"].values == 1) #Mask out points that lack data
# ===== Calculate a FRI =====
ds_out["FRI"] = 1.0/ds_out["AnBF"]
# ===== add some attrs =====
ds_out.attrs = ds_ann.attrs
GlobalAttributes(ds_out, dsn, fnameout=ppath+fname)
# ===== Save the file out =====
ds_out = tempNCmaker(
ds_out, ppath, fname, ["AnBF", "FRI"], {'longitude': chunksize},
readchunks=data[dsn]["chunks"], skip=False, name="%s %d degree MW" % (dsn, mwb))
# ipdb.set_trace()
cleanup.append(tpath+tname)
cleanup.append(tpath+tMnme)
print("Starting excess file cleanup at:", pd.Timestamp.now())
for file in cleanup:
if os.path.isfile(file):
os.remove(file)
def ANNcalculator(data, dsn, mask, force, ppath, dpath, chunksize, TCF):
""" Function to calculate the FRI
args
data: Ordered dict
dsn: str of the dataset name
ds: XR dataset
"""
# ========== Add a loading string for forest cover in Hansen Datasets ==========
if dsn.startswith("HANSEN") and (TCF > 0.):
tcfs = "_%dperTC" % np.round(TCF)
else:
tcfs = ""
# ========== Setup a working path ==========
tpath = ppath+"tmp/"
cf.pymkdir(tpath)
# ======================================================
# ========== Build the annual mean burnt area ==========
# ======================================================
# ========== setup the temp filnames ==========
tname = "%s%s_annual_burns.nc" % (dsn, tcfs)
if not os.path.isfile(tpath+tname) or force:
# breakpoint()
# ========== load the data ==========
ds = dsloader(data, dsn, ppath, dpath, force)
if dsn == 'GFED':
print(f"Loading {dsn} data into ram at", pd.Timestamp.now())
with ProgressBar():
ds = ds.compute()
# ========== calculate the sum ==========
dates = datefixer(data[dsn]["end"], 12, 31)
ds_flat = ds.mean(dim="time", keep_attrs=True).expand_dims({"time":dates["CFTime"]}).rename({data[dsn]["var"]:"AnBF"})
ds_flat.time.attrs["calendar"] = dates["calendar"]
ds_flat.time.attrs["units"] = dates["units"]
# ========== Write out the file ==========
attrs = GlobalAttributes(ds_flat, dsn, fnameout=ppath+tname)
# ========== add some form of mask here ==========
try:
ds_flat = ds_flat.where(mask["landwater"].values == 1).astype("float32")
except Exception as e:
# ========== Fix the Hansen mask ==========
print("starting mask reprocessing at:", pd.Timestamp.now())
mask = mask.sortby("latitude", ascending=False)
mask = mask.sel(dict(latitude=slice(70.0, 40.0), longitude=slice(-10.0, 180.0)))
ds_flat = ds_flat.where(mask["landwater"].values == 1).astype("float32")
# ========== create a date ==========
dates = datefixer(data[dsn]["end"], 12, 31)
# ===== fix the time =====
ds_flat["time"] = dates["CFTime"]
ds_flat.time.attrs["calendar"] = dates["calendar"]
ds_flat.time.attrs["units"] = dates["units"]
# ===== fix the attrs =====
ds_flat.attrs = ds.attrs
GlobalAttributes(ds_flat, dsn, fnameout=ppath+tname)
ds_flat = tempNCmaker(
ds_flat, tpath, tname, "AnBF",
data[dsn]["chunks"], skip=False, name="%s annual BA" % dsn)
# breakpoint()
else:
print("Opening existing Annual Burn Fraction file")
ds_flat = xr.open_dataset(tpath+tname)#, chunks=data[dsn]["chunks"])
return ds_flat
#==============================================================================
#==============================================================================
def tempNCmaker(ds, tmppath, tmpname, vname, writechunks, readchunks={'longitude': 1000}, skip=False, name="tmp"):
""" Function to save out a tempary netcdf """
cf.pymkdir(tmppath)
fntmp = tmppath + tmpname
if type(vname) == list:
encoding = OrderedDict()
for vn in vname:
encoding[vn] = {'shuffle':True,'zlib':True,'complevel':5}
else:
encoding = ({vname:{'shuffle':True,'zlib':True,'complevel':5}})
if not all([skip, os.path.isfile(fntmp)]):
delayed_obj = ds.to_netcdf(fntmp,
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"],
compute=False)
print("Starting write of %s data at" % name, pd.Timestamp.now())
with ProgressBar():
results = delayed_obj.compute()
dsout = xr.open_dataset(fntmp, chunks=readchunks)
return dsout
def dsloader(data, dsn, ppath, dpath, force):
"""Takes in infomation about datasets and loads a file
args
data: Ordered dict
dsn: str of the dataset name
returns:
ds
"""
# ========== check if the name needs to be globbed ==========
if "*" in data[dsn]["fname"]:
# ========== get all the file names ==========
fnames = glob.glob(data[dsn]["fname"])
lat = [] #a test to make sure the sizes are correct
for fn in fnames:
dsin = xr.open_dataset(fn, chunks=data[dsn]["chunks"])
lat.append(dsin[data[dsn]["var"]].shape[1] )
# ========== open the dataset ==========
ds = xr.open_mfdataset(fnames, combine='nested', concat_dim="time",chunks=(data[dsn]["chunks"]))
# ========== Add a simple dataset check ==========
if not np.unique(lat).shape[0] == 1:
warn.warn("the datasets have missmatched size, going interactive")
ipdb.set_trace()
sys.exit()
else:
ds = xr.open_dataset(data[dsn]["fname"], chunks=data[dsn]["chunks"])
return ds
def landseamaks(data, dsn, dpath, force, chunks=None, maskds = "esacci"):
# ========== create the mask fielname ==========
# masknm = ppath + "%s_landseamask.nc" % dsn
if dsn.startswith("HANSEN"):
print("starting mask reprocessing at:", pd.Timestamp.now())
masknm = dpath+"/masks/landwater/%s_landwater.nc" % maskds
raw_mask = xr.open_dataset(masknm, chunks=chunks)
raw_mask = raw_mask.sortby("latitude", ascending=False)
raw_mask = raw_mask.sel(dict(latitude=slice(70.0, 40.0), longitude=slice(-10.0, 180.0)))
else:
masknm = dpath+"/masks/landwater/%s_landwater.nc" % dsn
raw_mask = xr.open_dataset(masknm, chunks=chunks)
# if dsn == "esacci":
# chunks = data[dsn]["chunks"]
# raw_mask = xr.open_dataset(masknm, chunks=chunks)
return raw_mask
def datasets(dpath, chunksize, TCF = 0):
"""
args:
TCF: int
the fraction of Hansen forest cover included in the analysis, TCF = 10 gives 10 percent,
"""
if TCF == 0.:
tcfs = ""
else:
tcfs = "_%dperTC" % np.round(TCF)
# ========== set the filnames ==========
data= OrderedDict()
data["GFED"] = ({
'fname':"./data/BurntArea/GFED/processed/GFED_annual_burendfraction.nc",
'var':"BA", "gridres":"5km", "region":"SIBERIA", "timestep":"Annual",
"start":1997, "end":2016,"rasterio":False, "chunks":{'time':1,'longitude': chunksize, 'latitude': chunksize},
"rename":None
})
data["COPERN_BA"] = ({
'fname':dpath+"/BurntArea/COPERN_BA/processed/COPERN_BA_gls_*_SensorGapFix.nc",
'var':"BA", "gridres":"300m", "region":"Global", "timestep":"AnnualMax",
"start":2014, "end":2019,"rasterio":False, "chunks":{'time':1, 'longitude': chunksize, 'latitude': chunksize},
"rename":{"lon":"longitude", "lat":"latitude"}
})
data["MODIS"] = ({
"fname":dpath+"/BurntArea/MODIS/MODIS_MCD64A1.006_500m_aid0001_reprocessedBAv2.nc",
'var':"BA", "gridres":"500m", "region":"Siberia", "timestep":"Annual",
"start":2001, "end":2018, "rasterio":False, "chunks":{'time':1,'longitude': chunksize, 'latitude': chunksize},
"rename":None, "maskfn":"/media/ubuntu/Seagate Backup Plus Drive/Data51/BurntArea/MODIS/MASK/MCD12Q1.006_500m_aid0001v2.nc"
})
data["esacci"] = ({
"fname":dpath+"/BurntArea/esacci/processed/esacci_FireCCI_*_burntarea.nc",
'var':"BA", "gridres":"250m", "region":"Asia", "timestep":"Annual",
"start":2001, "end":2018, "rasterio":False, "chunks":{'time':1, 'longitude': chunksize, 'latitude': chunksize},
"rename":None, "maskfn":"/media/ubuntu/Seagate Backup Plus Drive/Data51/BurntArea/esacci/processed/esacci_landseamask.nc"
})
data["HANSEN"] = ({
"fname":dpath+"/BurntArea/HANSEN/lossyear/Hansen_GFC-2018-v1.6_*_totalloss_SIBERIAatesacci%s.nc" % tcfs,
'var':"lossyear", "gridres":"250m", "region":"Siberia", "timestep":"Annual",
"start":2001, "end":2018, "rasterio":False, "chunks":{'time':1, 'longitude': chunksize, 'latitude': chunksize},
"rename":None,
})
data["HANSEN_AFmask"] = ({
"fname":dpath+"/BurntArea/HANSEN/lossyear/Hansen_GFC-2018-v1.6_*_totalloss_SIBERIAatesacci%s_MODISAFmasked.nc" % tcfs,
'var':"lossyear", "gridres":"250m", "region":"Siberia", "timestep":"Annual",
"start":2001, "end":2018, "rasterio":False, "chunks":{'time':1, 'longitude': chunksize, 'latitude': chunksize},
"rename":None,
})
return data
def datefixer(year, month, day):
"""
Opens a netcdf file and fixes the data, then save a new file and returns
the save file name
args:
ds: xarray dataset
dataset of the xarray values
return
time: array
array of new datetime objects
"""
# ========== create the new dates ==========
# +++++ set up the list of dates +++++
dates = OrderedDict()
tm = [dt.datetime(int(year) , int(month), int(day))]
dates["time"] = | pd.to_datetime(tm) | pandas.to_datetime |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in Javascript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from typing import Any, Callable, Dict, Optional, Union
import pandas as pd
from superset.utils.core import DTTM_ALIAS, extract_dataframe_dtypes, get_metric_name
def sql_like_sum(series: pd.Series) -> pd.Series:
"""
A SUM aggregation function that mimics the behavior from SQL.
"""
return series.sum(min_count=1)
def pivot_table(
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None
) -> Dict[Any, Any]:
"""
Pivot table.
"""
for query in result["queries"]:
data = query["data"]
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
import argparse
import time
from scipy.special import gamma
import os
import pickle
import torch
import NMF_functions
from ARD_NMF import ARD_NMF
import pyarrow.feather as feather
from ARD_NMF import run_method_engine
import torch.nn as nn
import torch.multiprocessing as mp
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def run_parameter_sweep(parameters,dataset,args,Beta):
output = []
objectives = []
nsigs = []
times = []
for idx in range(len(parameters)):
data = ARD_NMF(dataset,args.objective)
W,H,cost,time = run_method_engine(data, args.a, args.phi, args.b, Beta,
args.prior_on_W, args.prior_on_H, args.K0, args.tolerance,args.max_iter)
nsig = write_output(W,H,data.channel_names,data.sample_names,args.output_dir,
args.output_prefix + "_" + parameters['label'][idx])
times.append(time)
nsigs.append(nsig)
objectives.append(cost)
parameters['nsigs'] = nsigs
parameters['objective'] = objectives
parameters['times'] = times
parameters.to_csv(args.output_dir + '/' + args.output_prefix + '_results.txt',sep='\t',index=None)
def write_output(W, H, channel_names, sample_names, output_directory, label, active_thresh = 1e-5):
createFolder(output_directory)
nonzero_idx = (np.sum(H, axis=1) * np.sum(W, axis=0)) > active_thresh
W_active = W[:, nonzero_idx]
H_active = H[nonzero_idx, :]
nsig = np.sum(nonzero_idx)
# Normalize W and transfer weight to H matrix
W_weight = np.sum(W_active, axis=0)
W_final = W_active / W_weight
H_final = W_weight[:, np.newaxis] * H_active
sig_names = ['W' + str(j) for j in range(1, nsig + 1)]
W_df = pd.DataFrame(data=W_final, index=channel_names, columns=sig_names)
H_df = pd.DataFrame(data=H_final, index=sig_names, columns=sample_names);
# Write W and H matrices
W_df.to_csv(output_directory + '/'+label+ '_W.txt', sep='\t')
H_df.to_csv(output_directory + '/'+label+ '_H.txt', sep='\t')
return nsig
def main():
''' Run ARD NMF'''
torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(
description='NMF with some sparsity penalty described https://arxiv.org/pdf/1111.6085.pdf')
parser.add_argument('--data', help='Data Matrix', required=True)
parser.add_argument('--feather', help='Input in feather format', required=False, default=False, action='store_true')
parser.add_argument('--parquet', help='Input in parquet format', required=False, default=False, action='store_true')
parser.add_argument('--K0', help='Initial K parameter', required=False, default=None, type=int)
parser.add_argument('--max_iter', help='maximum iterations', required=False, default=10000, type=int)
parser.add_argument('--del_', help='Early stop condition based on lambda change', required=False, default=1,
type=int)
parser.add_argument('--tolerance', help='Early stop condition based on max lambda entry', required=False, default=1e-6,
type=float)
parser.add_argument('--phi', help='dispersion parameter see paper for discussion of choosing phi '
'default = 1', required=False, default=1.0, type=float)
parser.add_argument('--a', help='Hyperparamter for lambda. We recommend trying various values of a. Smaller values'
'will result in sparser results a good starting point might be'
'a = log(F+N)', required=False, default=10.0,type=float)
parser.add_argument('--b', help='Hyperparamter for lambda. Default used is as recommended in Tan and Fevotte 2012',
required = False,type=float, default = None)
parser.add_argument('--objective',help='Defines the data objective. Choose between "poisson" or "gaussian". Defaults to Poisson',
required=False,default='poisson',type=str)
parser.add_argument('--prior_on_W',help = 'Prior on W matrix "L1" (exponential) or "L2" (half-normal)'
,required = False, default = 'L1',type=str)
parser.add_argument('--prior_on_H',help = 'Prior on H matrix "L1" (exponential) or "L2" (half-normal)'
,required = False, default = 'L1',type=str)
parser.add_argument('--output_dir', help='output_file_name if run in array mode this correspond to the output directory', required=True)
parser.add_argument('--output_prefix', help='Prefix for output files', required=False, default="result", type=str)
parser.add_argument('--labeled', help='Input has row and column labels', required=False,default=False, action='store_true')
parser.add_argument('--report_frequency', help='Number of iterations between progress reports', required=False,
default=100, type=int)
parser.add_argument('--dtype', help='Floating point accuracy', required=False,
default='Float32', type=str)
parser.add_argument('--parameters_file', help='allows running many different configurations of the NMF method on a multi'
'GPU system. To run in this mode provide this argument with a text file with '
'the following headers:(a,phi,b,prior_on_W,prior_on_H,Beta,label) label '
'indicates the output stem of the results from each run.', required = False
,default = None)
args = parser.parse_args()
print('Reading data frame from '+ args.data)
if args.dtype == 'Float32':
args.dtype = torch.float32
elif args.dtype == 'Float16':
args.dtype = torch.float16
if args.parquet:
dataset = pd.read_parquet(args.data)
elif args.feather:
print('loading feather...')
dataset = feather.read_dataframe(args.data)
else:
if args.labeled:
dataset = | pd.read_csv(args.data, sep='\t', header=0, index_col=0) | pandas.read_csv |
# coding=utf-8
from __future__ import absolute_import, print_function
import os
import pandas as pd
from suanpan.app.arguments import Csv
from suanpan.app import app
from suanpan.storage import storage
from suanpan.utils import image
from suanpan import path
from text.opencv_dnn_detect import angle_detect
from utils.function import detect_angle
from arguments import Images
@app.input(Images(key="inputImage"))
@app.output(Images(key="outputImage"))
@app.output(Images(key="outputImageRaw"))
@app.output(Csv(key="outputData"))
def SPAngleModel(context):
args = context.args
images = args.inputImage
outputData = {"image": [], "angle": []}
for i, img in enumerate(images):
img, angle = detect_angle(img[:, :, ::-1], angle_detect)
image.save(
os.path.join(
args.outputImage,
storage.delimiter.join(images.images[i].split(storage.delimiter)[8:]),
),
img[:, :, ::-1],
)
outputData["image"].append(
storage.delimiter.join(images.images[i].split(storage.delimiter)[8:])
)
outputData["angle"].append(angle)
outputData = | pd.DataFrame(outputData) | pandas.DataFrame |
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
# import emoji
import gc
from utils.definitions import ROOT_DIR
from collections import OrderedDict
from utils.datareader import Datareader
def check_conditions( df, mean, std, error=(1.5,1.5)):
"""
checks if the dataframe given is near has the duration similar to the one that we want to create
similar == if its mean, std and number of emojis is at +/- error[] from it
:param df: dataframe to check
:param mean: target mean
:param std: target std
:param error:
:return:
"""
target_mean = np.mean(df['num_tracks'])
target_std = np.std(df['num_tracks'])
if mean > (target_mean + error[0]) or mean < (target_mean - error[0]):
print("error m ",mean,target_mean)
return False
if std > (target_std + error[1]) or std < (target_std - error[1]):
print("error s ",std,target_std)
return False
return True
def get_random_df_constrained( source_df, num_of_pl, min_v, max_v, mean, std, errors=(1.5, 1.5)):
"""
iterates until it creates a dataframe that satisfies the conditions.
"""
seed = 0
while True:
df = source_df[((source_df['num_tracks']) >= min_v) & ((source_df['num_tracks']) <= max_v)].sample(
n=num_of_pl, random_state=seed)
if check_conditions(df, mean=mean, std=std, error=errors):
break
seed+=1
return df,seed
def generate_train(playlists):
## mean
cates = {'cat1': (10, 50, 1000, 28.6, 11.2), 'cat2_1': (10, 40, 998, 23.8, 8.7),
'cat2_2': (70, 80, 2, 75, 4), 'cat3_1': (10, 50, 314, 29.4, 11.4),
'cat3_2': (51, 75, 425, 62, 7.2), 'cat3_3': (75, 100, 261, 87, 7.1),
'cat4': (40, 100, 1000, 63, 16.5), 'cat5': (40, 100, 1000, 63.5, 17.2),
'cat6': (40, 100, 1000, 63.6, 16.7), 'cat7': (101, 250, 1000, 150, 38.6),
'cat8': (101, 250, 1000, 151.7, 38.6), 'cat9': (150, 250, 1000, 189, 28),
'cat_10': (150, 250, 1000, 187.5, 27)}
cates = OrderedDict(sorted(cates.items(), key=lambda t: t[0]))
cat_pids = {}
seeds = [0] * len(cates)
count = 0
for cat, info in cates.items():
print(cat)
df, seeds[count] = get_random_df_constrained(playlists, min_v=info[0], max_v=info[1],
num_of_pl=info[2],
mean=info[3], std=info[4], errors=(1.5, 1.5))
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
count += 1
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
# all interactions used for evaluation
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
df_eval_itr = pd.concat([df_eval_itr, df_itr])
# clean interactions for training
interactions = interactions.drop(df_itr.index)
print("cat1 done")
elif cat == 'cat2_1' or cat == 'cat2_2':
num_samples = 1
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[df_itr['pos'] == 0]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat2 done")
elif cat == 'cat3_1' or cat == 'cat3_2' or cat == 'cat3_3':
num_samples = 5
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat3 done")
elif cat == 'cat4':
num_samples = 5
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat4 done")
elif cat == 'cat5':
num_samples = 10
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat5 done")
elif cat == 'cat6':
num_samples = 10
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat6 done")
elif cat == 'cat7':
num_samples = 25
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat7 done")
elif cat == 'cat8':
num_samples = 25
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
for pid in cat_pids[cat]:
df = df_itr[df_itr['pid'] == pid]
df_sample = df.sample(n=num_samples)
df_test_itr = pd.concat([df_test_itr, df_sample])
df = df.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df])
print("cat8 done")
elif cat == 'cat9':
num_samples = 100
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat9 done")
elif cat == 'cat_10':
num_samples = 100
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
for pid in cat_pids[cat]:
df = df_itr[df_itr['pid'] == pid]
df_sample = df.sample(n=num_samples)
df_test_itr = pd.concat([df_test_itr, df_sample])
df = df.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df])
print("cat10 done")
else:
raise(Exception,"cat not present ")
exit()
tracks = pd.read_csv(ROOT_DIR+"/data/original/tracks.csv", delimiter='\t')
tids = set(df_eval_itr['tid'])
df = tracks[tracks['tid'].isin(tids)]
df = df[['tid', 'arid']]
df_eval_itr = pd.merge(df_eval_itr, df, on='tid')
del (tracks)
del (df)
df_test_pl.reset_index(inplace=True, drop=True)
df_test_itr.reset_index(inplace=True, drop=True)
df_eval_itr.reset_index(inplace=True, drop=True)
interactions.reset_index(inplace=True, drop=True)
return df_test_pl, df_test_itr, df_eval_itr, interactions
def main_10k_without_seeds():
df_playlists = | pd.read_csv(ROOT_DIR+"/data/original/train_playlists.csv", delimiter='\t') | pandas.read_csv |
import pandas as pd
import numpy as np
from pandas import Int8Dtype
@pd.api.extensions.register_extension_dtype
class Bool(Int8Dtype):
name = "Bool"
# TODO: overload dtype Int8 name...
x = pd.Series([True, False, False, np.nan] * 100000, dtype="Bool")
print(x.memory_usage(deep=True), x.dtype)
z = pd.Series([True, False, False, np.nan] * 100000)
print(z.memory_usage(deep=True), z.dtype)
u = pd.Series([True, False, False, np.nan] * 100000, dtype="float")
print(u.memory_usage(deep=True), u.dtype)
y = | pd.Series([True, False, False, False] * 100000, dtype="bool") | pandas.Series |
import os
import pandas as pd
import numpy as np
import logging
import wget
import time
import pickle
from src.features import preset
from src.features import featurizer
from src.data.utils import LOG
from matminer.data_retrieval.retrieve_MP import MPDataRetrieval
from tqdm import tqdm
from pathlib import Path
from src.data.get_data_MP import data_MP
import dotenv
def featurize_by_material_id(material_ids: np.array,
featurizerObject: featurizer.extendedMODFeaturizer,
MAPI_KEY: str,
writeToFile: bool = True) -> pd.DataFrame:
""" Run all of the preset featurizers on the input dataframe.
Arguments:
df: the input dataframe with a `"structure"` column
containing `pymatgen.Structure` objects.
Returns:
The featurized DataFrame.
"""
def apply_featurizers(criterion, properties, mpdr, featurizerObject):
LOG.info("Downloading dos and bandstructure objects..")
timeDownloadStart = time.time()
df_portion = mpdr.get_dataframe(criteria=criterion, properties=properties)
timeDownloadEnd = time.time()
LOG.info(df_portion)
df_time, df_portion = featurizerObject.featurize(df_portion)
df_time["download_objects"] = [timeDownloadEnd-timeDownloadStart]
return df_time, df_portion
properties = ["material_id","full_formula", "bandstructure", "dos", "structure"]
mpdr = MPDataRetrieval(MAPI_KEY)
steps = 1
leftover = len(material_ids)%steps
df = | pd.DataFrame({}) | pandas.DataFrame |
import time
import numpy as np
import pandas as pd
def add_new_category(x):
"""
Aimed at 'trafficSource.keyword' to tidy things up a little
"""
x = str(x).lower()
if x == 'nan':
return 'nan'
x = ''.join(x.split())
if r'provided' in x:
return 'not_provided'
if r'youtube' in x or r'you' in x or r'yo' in x or r'tub' in x or r'yout' in x or r'y o u' in x:
return 'youtube'
if r'google' in x or r'goo' in x or r'gle' in x:
return 'google'
else:
return 'other'
# Dump cleaned data to parquets for later.
train_df = pd.read_parquet('input/cleaned/train.parquet.gzip')
test_df = pd.read_parquet('input/cleaned/test.parquet.gzip')
# Remove target col.
y_train = train_df['totals.transactionRevenue'].values
train_df = train_df.drop(['totals.transactionRevenue'], axis=1)
# Join datasets for rowise feature engineering.
trn_len = train_df.shape[0]
merged_df = pd.concat([train_df, test_df])
num_cols = ["totals.hits", "totals.pageviews", "visitNumber", "visitStartTime"]
for col in num_cols:
merged_df[col] = merged_df[col].astype(float)
merged_df['diff_visitId_time'] = merged_df['visitId'] - merged_df['visitStartTime']
merged_df['diff_visitId_time'] = (merged_df['diff_visitId_time'] != 0).astype(float)
merged_df['totals.hits'] = merged_df['totals.hits'].astype(float)
# Build Time based features.
merged_df['formated_date'] = pd.to_datetime(merged_df['date'], format='%Y%m%d')
merged_df['month'] = pd.DatetimeIndex(merged_df['formated_date']).month
merged_df['year'] = pd.DatetimeIndex(merged_df['formated_date']).year
merged_df['day'] = pd.DatetimeIndex(merged_df['formated_date']).day
merged_df['quarter'] = pd.DatetimeIndex(merged_df['formated_date']).quarter
merged_df['weekday'] = pd.DatetimeIndex(merged_df['formated_date']).weekday
merged_df['weekofyear'] = | pd.DatetimeIndex(merged_df['formated_date']) | pandas.DatetimeIndex |
from django.shortcuts import render
from django.http import HttpResponse
from datetime import datetime
import psycopg2
import math
import pandas as pd
from openpyxl import Workbook
import csv
import random
def psql_pdc(query):
#credenciales PostgreSQL produccion
connP_P = {
'host' : '10.150.1.74',
'port' : '5432',
'user':'postgres',
'password':'<PASSWORD>',
'database' : 'postgres'}
#conexion a PostgreSQL produccion
conexionP_P = psycopg2.connect(**connP_P)
#print('\nConexión con el servidor PostgreSQL produccion establecida!')
cursorP_P = conexionP_P.cursor ()
#ejecucion query telefonos PostgreSQL
cursorP_P.execute(query)
anwr = cursorP_P.fetchall()
cursorP_P.close()
conexionP_P.close()
return anwr
def to_horiz(anwr_P,name,_id):
#vertical horizontal
anwr_P1 = anwr_P.pivot(index=0,columns=1)
anwr_P1[_id] = anwr_P1.index
col1 = []
i=0
for i in range(anwr_P1.shape[1]-1):
col1.append(name+str(i+1))
col1.append(_id)
anwr_P1.columns = col1
return anwr_P1
def csv_o(fn,name):
response = HttpResponse(content_type = "text/csv")
content = "attachment; filename = %s"%name
response["Content-Disposition"] = content
# for j in range(fn.shape[1]):
# try:
# fn.iloc[:,j] = fn.iloc[:,j].str.decode(encoding='utf-8-sig')
# fn.iloc[:,j] = fn.iloc[:,j].str.encode(encoding='utf_16_le')
# except:
# pass
fn2 = [tuple(x) for x in fn.values]
writer = csv.writer(response,delimiter ='|')
writer.writerow(fn.columns)
writer.writerows(fn2)
return response
def excel(fn,name):
wb = Workbook()
ws = wb.active
k = 0
a = pd.DataFrame(fn.columns)
for k in range(a.shape[0]):
ws.cell(row = 1, column = k+1).value = a.iloc[k,0]
i=0
j=0
for i in range(fn.shape[0]):
for j in range(0,fn.shape[1]):
try:
ws.cell(row = i+2, column = j+1).value = fn.iloc[i,j]
except:
pass
response = HttpResponse(content_type = "application/ms-excel")
content = "attachment; filename = %s"%name
response["Content-Disposition"] = content
wb.save(response)
return response
def excel_CV_COL(request):
today = datetime.now()
tablename = "CV_Col"+today.strftime("%Y%m%d%H") + ".xlsx"
with open("./hello/Plantillas/Colp/QueryTel_COL.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Colp/QueryCor_COL.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Colp/QueryDir_COL.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Colp/QueryCV_COL.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Colp/QueryCiu_COL.txt","r") as f6:
queryP_Ciu = f6.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
df = df.rename(columns={0:'rownumber',
1:'obligacion_id',
2:'deudor_id',
3:'unico',
4:'estado',
5:'tipo_cliente',
6:'nombre',
7:'producto',
8:'initial_bucket',
9:'ciudad',
10:'sucursal',
11:'tipo_prod',
12:'dias_mora_inicial',
13:'dias_mora_actual',
14:'rango_mora_inicial',
15:'rango_mora_final',
16:'rango',
17:'suma_pareto',
18:'rango_pareto',
19:'fcast',
20:'fdesem',
21:'vrdesem',
22:'saldo_total_inicial',
23:'saldo_total_actual',
24:'saldo_capital_inicial',
25:'saldo_capital_actual',
26:'saldo_vencido_inicial',
27:'saldo_vencido_actual',
28:'pagomin',
29:'fultpago',
30:'vrultpago',
31:'agencia',
32:'tasainter',
33:'feultref',
34:'ultcond',
35:'fasigna',
36:'eqasicampana',
37:'diferencia_pago',
38:'pago_preliminar',
39:'pago_cliente',
40:'min',
41:'tarifa',
42:'honorarios',
43:'perfil_mes_4',
44:'perfil_mes_3',
45:'perfil_mes_2',
46:'perfil_mes_1',
47:'fecha_primer_gestion',
48:'fecha_ultima_gestion',
49:'perfil_mes_actual',
50:'contactabilidad',
51:'ultimo_alo',
52:'descod1',
53:'descod2',
54:'asesor',
55:'fecha_gestion',
56:'telefono_mejor_gestion',
57:'mejorgestionhoy',
58:'asesor_indicador_hoy',
59:'repeticion',
60:'llamadas',
61:'sms',
62:'correos',
63:'gescall',
64:'visitas',
65:'whatsapp',
66:'no_contacto',
67:'total_gestiones',
68:'telefono_positivo',
69:'marcaciones_telefono_positivo',
70:'ultima_marcacion_telefono_positivo',
71:'fec_creacion_ult_compromiso',
72:'fec_pactada_ult_compromiso',
73:'valor_acordado_ult_compromiso',
74:'asesor_ult_compromiso',
75:'cantidad_acuerdos_mes',
76:'estado_acuerdo',})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
return excel(fn,tablename)
def csv_CV_Claro(request):
today = datetime.now()
tablename = "CV_Claro" + today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Claro/QueryTel_Claro.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Claro/QueryCor_Claro.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Claro/QueryCV_Claro.txt","r") as f4:
queryP_cons = f4.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
yanwr = psql_pdc(queryP_cons)
#dataframes
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
df = pd.DataFrame(yanwr)
anwr_P1 = to_horiz(anwr_P,'phone','deudor_id')
#renombrar campos correos
anwr_C = anwr_C.rename(columns={
0:'deudor_id',
1:'mail0',
2:'mail1'})
anwr_C1 = anwr_C.drop_duplicates(subset=['deudor_id'])
#renombrar campos CV
df = df.rename(columns={0:'rownumber',
1:'deudor_id',
2:'obligacion_id',
3:'nombredelcliente',
4:'estado',
5:'tipo_cliente',
6:'unico',
7:'crmorigen',
8:'potencialmark',
9:'prepotencialmark',
10:'writeoffmark',
11:'dias_mora',
12:'segmento_bpo',
13:'rango_bpo',
14:'tipo',
15:'fecha_de_vencimiento',
16:'min_cliente',
17:'valorscoring',
18:'numeroreferenciadepago',
19:'monto_inicial',
20:'monto_ini_cuenta',
21:'porcentaje_descuento',
22:'valor_descuento',
23:'valor_a_pagar',
24:'deuda_real',
25:'valor_pago',
26:'saldo_pendiente',
27:'fecha_pago',
28:'fecha_compromiso',
29:'fecha_pago_compromiso',
30:'valor_compromiso',
31:'estado_acuerdo',
32:'ind_m4',
33:'ind_m3',
34:'ind_m2',
35:'ind_m1',
36:'fecha_primer_gestion',
37:'fecha_ultima_gestion',
38:'indicador',
39:'phone',
40:'asesor',
41:'fecha_gestion',
42:'contactabilidad',
43:'indicador_hoy',
44:'repeticion',
45:'llamadas',
46:'sms',
47:'correos',
48:'gescall',
49:'whatsapp',
50:'visitas',
51:'no_contacto',
52:'total_gestiones',
53:'telefono_positivo',
54:'fec_ultima_marcacion'})
#a = fn[fn.obligacion_id == '9876510000211227']
#i=0
#lin = ['no_contacto_mes_actual','gescall_mes_actual','tel_mes_actual','tel_positivo']
#for i in lin:
# df[i].fillna(0,inplace=True)
# df[i] = df[i].apply(lambda x: round(x))
# df[i] = df[i].astype('str')
fn = pd.merge(df,anwr_P1,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,anwr_C1,on = ["deudor_id"]\
,how = "left",indicator = False)
return csv_o(fn,tablename)
def csv_CV_CarP(request):
today = datetime.now()
tablename = "CV_CarP" + today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/CarP/QueryTel_CarP.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/CarP/QueryCor_CarP.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/CarP/QueryDir_CarP.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/CarP/QueryCV_CarP.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/CarP/QueryCiu_CarP.txt","r") as f5:
queryP_Ciu = f5.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
#renombrar campos CV
df = df.rename(columns={0:'deudor_id',
1:'unico',
2:'nombre',
3:'obligacion',
4:'obligacion_17',
5:'tipo_cliente',
6:'sucursal_final',
7:'zona',
8:'ano_castigo',
9:'saldo_k_pareto_mes_vigente',
10:'intereses',
11:'honorarios_20',
12:'saldo_total_mes_vigente',
13:'saldo_total_pareto_mes_vigente_',
14:'saldokpareto',
15:'rango_k_pareto',
16:'interesespareto',
17:'honorariospareto',
18:'porcentaje_k_del_total',
19:'porcentaje_intereses_del_total',
20:'porcentaje_honorarios_del_total',
21:'rango_k_porcentaje',
22:'capital_20_porciento',
23:'dias_mora_acumulado',
24:'marca_juridica_cliente',
25:'focos',
26:'valor_pago',
27:'ultima_fecha_pago',
28:'estado_cliente_mes_anterior',
29:'valor_compromiso',
30:'fecha_compromiso',
31:'fecha_pactada_compromiso',
32:'asesor_compromiso',
33:'ind_m4',
34:'ind_m3',
35:'ind_m2',
36:'ind_m1',
37:'fecha_primer_gestion',
38:'fecha_ultima_gestion',
39:'indicador',
40:'telefono_mejor_gestion',
41:'asesor_mejor_gestion',
42:'fecha_gestion',
43:'contactabilidad',
44:'indicador_hoy',
45:'repeticion',
46:'llamadas',
47:'sms',
48:'correos',
49:'gescall',
50:'whatsapp',
51:'visitas',
52:'no_contacto',
53:'total_gestiones',
54:'telefono_positivo',
55:'fec_ultima_marcacion',
56:'investigacion_de_bienes'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
return csv_o(fn,tablename)
def csv_CV_FalaJ(request):
today = datetime.now()
tablename = "CV_FalJ"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Fala/QueryTel_Fal.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Fala/QueryCor_Fal.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Fala/QueryDir_Fal.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Fala/QueryCV_FalJ.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Fala/QueryPa_Fal.txt","r") as f5:
queryP_Pa = f5.read()
with open("./hello/Plantillas/Fala/QueryRe_Fal.txt","r") as f6:
queryP_PR = f6.read()
with open("./hello/Plantillas/Fala/QueryCiu_Fal.txt","r") as f7:
queryP_Ciu = f7.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrPa = psql_pdc(queryP_Pa)
anwrR = psql_pdc(queryP_PR)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Pa = pd.DataFrame(anwrPa)
anwr_R = pd.DataFrame(anwrR)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infR = to_horiz(anwr_R,'referencia',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
try:
infP = to_horiz(anwr_Pa,'pago','obligacion_id')
if infP.shape[1] > 4:
tipos = infP.dtypes.to_frame()
tipos['index'] = range(len(tipos))
tipos = tipos.set_index('index')
su = []
for n in range(len(tipos)):
if str(tipos.iloc[n,0]) == 'float64':
su.append(n)
else:
pass
infP1 = infP[['pago1','pago2']]
infP1['pago3'] = infP.iloc[:,2:max(su)+1].sum(axis = 1)
infP1['obligacion_id'] = infP.index
infP = infP1
i=0
lin = ['pago1','pago2','pago3']
for i in lin:
infP[i].fillna(0,inplace=True)
infP[i] = infP[i].apply(lambda x: round(x))
infP[i] = '$' + infP[i].astype('str')
except:
pass
#renombrar campos CV
df = df.rename(columns={0:'idcbpo',
1:'tipo_producto_asignacion',
2:'grupo',
3:'cartera',
4:'tipo_cliente',
5:'unico',
6:'unico_pro',
7:'obligacion_id',
8:'deudor_id',
9:'nombre',
10:'producto',
11:'saldototal',
12:'saldo_pareto',
13:'segmentacion',
14:'peso',
15:'alturamora_hoy',
16:'rango',
17:'dias_mora',
18:'vencto',
19:'indicador_mejor_gestion',
20:'total_gestiones',
21:'fecha_ultima_gestion',
22:'asesor_mejor_gestion',
23:'fecha_compromiso',
24:'fecha_pago_compromiso',
25:'valor_compromiso',
26:'asesor',
27:'estado_acuerdo',
28:'dias_mora_pagos',
29:'valor_pago',
30:'fecha_pago',
31:'pendiente',
32:'pago_total',
33:'nvo_status',
34:'status_refresque',
35:'nvo_status_refresque',
36:'dias_mora_refresque',
37:'pendiente_mas_gastos',
38:'vencida_mas_gastos',
39:'gastos_mora',
40:'gastos_cv',
41:'porcentaje_gasto',
42:'valor_a_mantener_sin_gxc',
43:'cv8',
44:'cv9',
45:'cv10',
46:'cv11',
47:'cv12',
48:'restructuracion',
49:'valor_restruc',
50:'pagominimo_actual',
51:'pagominimo_anterior',
52:'periodo_actual',
53:'periodo_anterior',
54:'cuota36',
55:'cuota48',
56:'cuota60',
57:'cuota72',
58:'proyectada_cargue',
59:'aplica_ajuste',
60:'fecha',
61:'diferencia',
62:'porcentaje_saldo_total',
63:'x',
64:'valor',
65:'porcentaje_participacion',
66:'ind_m4',
67:'ind_m3',
68:'ind_m2',
69:'ind_m1',
70:'fecha_primer_gestion',
71:'telefono_mejor_gestion',
72:'fecha_gestion',
73:'contactabilidad',
74:'indicador_hoy',
75:'repeticion',
76:'llamadas',
77:'sms',
78:'correos',
79:'gescall',
80:'whatsapp',
81:'visitas',
82:'no_contacto',
83:'telefono_positivo',
84:'fec_ultima_marcacion',
85:'lista_robinson'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infR,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
if 'infP' in locals():
# Cruce pagos
fn = pd.merge(fn,infP,on = ["obligacion_id"]\
,how = "left",indicator = False)
# ordenamiento
lt = fn.columns.tolist()
lt = lt[:29] + lt[(infP.shape[1]-1)*-1:] + lt[29:fn.shape[1]-(infP.shape[1]-1)]
fn = fn[lt]
return csv_o(fn,tablename)
def csv_CV_FalaC(request):
today = datetime.now()
tablename = "CV_FalC"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Fala/QueryTel_Fal.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Fala/QueryCor_Fal.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Fala/QueryDir_Fal.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Fala/QueryCV_FalC.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Fala/QueryPa_Fal.txt","r") as f5:
queryP_Pa = f5.read()
with open("./hello/Plantillas/Fala/QueryRe_Fal.txt","r") as f6:
queryP_PR = f6.read()
with open("./hello/Plantillas/Fala/QueryCiu_Fal.txt","r") as f7:
queryP_Ciu = f7.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrPa = psql_pdc(queryP_Pa)
anwrR = psql_pdc(queryP_PR)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = | pd.DataFrame(anwr) | pandas.DataFrame |
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
from typing import List, Union
import pandas as pd
from feature_engine.encoding.base_encoder import BaseCategoricalTransformer
from feature_engine.variable_manipulation import _check_input_parameter_variables
class MeanEncoder(BaseCategoricalTransformer):
"""
The MeanEncoder() replaces categories by the mean value of the target for each
category.
For example in the variable colour, if the mean of the target for blue, red
and grey is 0.5, 0.8 and 0.1 respectively, blue is replaced by 0.5, red by 0.8
and grey by 0.1.
The encoder will encode only categorical variables by default (type 'object' or
'categorical'). You can pass a list of variables to encode. Alternatively, the
encoder will find and encode all categorical variables (type 'object' or
'categorical').
With `ignore_format=True` you have the option to encode numerical variables as well.
The procedure is identical, you can either enter the list of variables to encode, or
the transformer will automatically select all variables.
The encoder first maps the categories to the numbers for each variable (fit). The
encoder then replaces the categories with those numbers (transform).
Parameters
----------
variables: list, default=None
The list of categorical variables that will be encoded. If None, the
encoder will find and transform all variables of type object or categorical by
default. You can also make the transformer accept numerical variables, see the
next parameter.
ignore_format: bool, default=False
Whether the format in which the categorical variables are cast should be
ignored. If false, the encoder will automatically select variables of type
object or categorical, or check that the variables entered by the user are of
type object or categorical. If True, the encoder will select all variables or
accept all variables entered by the user, including those cast as numeric.
Attributes
----------
encoder_dict_:
Dictionary with the target mean value per category per variable.
variables_:
The group of variables that will be transformed.
n_features_in_:
The number of features in the train set used in fit.
Methods
-------
fit:
Learn the target mean value per category, per variable.
transform:
Encode the categories to numbers.
fit_transform:
Fit to the data, then transform it.
inverse_transform:
Encode the numbers into the original categories.
Notes
-----
NAN are introduced when encoding categories that were not present in the training
dataset. If this happens, try grouping infrequent categories using the
RareLabelEncoder().
See Also
--------
feature_engine.encoding.RareLabelEncoder
References
----------
.. [1] <NAME>. "A Preprocessing Scheme for High-Cardinality Categorical
Attributes in Classification and Prediction Problems". ACM SIGKDD Explorations
Newsletter, 2001. https://dl.acm.org/citation.cfm?id=507538
"""
def __init__(
self,
variables: Union[None, int, str, List[Union[str, int]]] = None,
ignore_format: bool = False,
) -> None:
if not isinstance(ignore_format, bool):
raise ValueError("ignore_format takes only booleans True and False")
self.variables = _check_input_parameter_variables(variables)
self.ignore_format = ignore_format
def fit(self, X: pd.DataFrame, y: pd.Series):
"""
Learn the mean value of the target for each category of the variable.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The training input samples. Can be the entire dataframe, not just the
variables to be encoded.
y: pandas series
The target.
Raises
------
TypeError
- If the input is not a Pandas DataFrame.
- f user enters non-categorical variables (unless ignore_format is True)
ValueError
- If there are no categorical variables in the df or the df is empty
- If the variable(s) contain null values
Returns
-------
self
"""
X = self._check_fit_input_and_variables(X)
if not isinstance(y, pd.Series):
y = | pd.Series(y) | pandas.Series |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Importing the required modules
import pandas as pd
import numpy as np
import time
import sys
import warnings
from collections import defaultdict
from operator import itemgetter
# To make sure warnings are filtered out
warnings.filterwarnings("ignore")
col_name = ['user_id', 'item_id', 'ratings', 'timestamp']
# Reading from input csv files and storing in data frames
df = pd.read_csv("./ratings.csv")
movies = pd.read_csv("./movies.csv")
df = pd.merge(df, movies, on='movieId')
avg_rating_df = pd.DataFrame(df.groupby('title')['rating'].mean())
avg_rating_df['no_of_ratings'] = df.groupby('title')['rating'].count()
avg_rating_df['title'] = avg_rating_df.index
um_rating = df.pivot_table(index='userId', columns='title',
values='rating')
# Recommend function to output movies according to correlation
# to the movies present in database
def recommend(userID, genre=None):
rec_mov = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 15 17:14:55 2021
@author: sergiomarconi
"""
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from mlxtend.classifier import StackingCVClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from mlxtend.classifier import SoftmaxRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import normalize
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import RUSBoostClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import SVC
import category_encoders as ce
species_to_genus = {
'2PLANT':"NA",
'ABBA':"AB",
'ABLAL':"AB",
"ABLO":'AB',
"ABMA":"AB",
"ABAM":"AB",
'ACNE2': "AC",
'ACNEN': "AC",
'ACPE':"AC",
'ACRU': "AC",
'ACSA3' : "AC",
'ACSA2' :"AC",
'ACFA':"AC2",
'ACKO': "AC2",
'ACGR':"AC2",
'AIAL' : "AI",
'ALRU2': "AL",
'ALVI5':'AL',
'AMLA' : "AM",
'AMEL':'AM',
'ARVIM':"AR",
'BEAL2': "BE",
'BEGL/BENA':"BE",
'BEPA': "BE",
'BELE': "BE",
'BEPO': "BE",
'BETUL':'BE',
'BENE4' : "BE",
'BUBU':"BU",
'BUSI':"BU",
'BOSU2':"BO",
'CACA18':"CA1",
'CADE27':"CA2",
'CAGL8':"CA3",
'CAOV2':"CA3",
'CAOV3':"CA3",
'CAAQ2':'CA',
'CACO15': "CA3",
'CATO6':"CA3",
'CAIL2':"CA3",
'CECA4':"CE1",
'CELA':"CE2",
'CEOC':"CE2",
'CODR':"CO",
'CODI8':"CO",
'COFL2':"CO2",
'DIVI5':"DI",
'ELAN':"EL",
'FAGR':"FA",
'FRAM2':"FR",
'FRAXI':'FR',
'FRNI':'FR',
'LARIX':'LA',
'ILAN':'IL',
'FRPE':"FR",
'GYDI':"GY",
'GUOF':"GU",
'GUSA':"GU",
'GLTR':"GL",
'HALES':"HA",
'JUNI':"JU1",
'JUNIP':"JU2",
'JUVI':"JU2",
'JUOS':"JU2",
'LIST2':"LI1",
'LITU':"LI2",
'MAPO':"MA",
'MAFR':'MA',
'MAGNO':'MA',
'MORU2':"MO",
'NYBI':"NY",
'NYSY':"NY",
'NYAQ2':'NY',
'OXYDE':"OX",
'OXAR':"OX",
'OSVI':'OS',
'PICEA':"PI1",
'PIAL3':"PI2",
'PIAC':"PI3",
'PICO':"PI2",
'PIEL':"PI2",
'PIEN':"PI2",
'PIEC2':"PI2",
'PIFL2':"PI2",
'PIGL':"PI2",
'PIMA':"PI2",
'PINUS':'PI2',
'PIPA2':"PI2",
'PIPO':"PI2",
'PIRU':"PI2",
'PIPOS':"PI2",
'PIPU5':"PI2",
'PIST':"PI2",
'PITA':"PI2",
'PIGL2':"PI2",
'PIED':"PI",
'PIJE':"PI",
'PIRI':'PI',
'PIVI2':'PI',
'PINUS':"PI2",
'PLOC':"PL",
'POTR5':"PO",
'POGR4':"PO",
'PODE3':"PO",
'PRVE':"PR",
'PRVI':"PR",
'PRAV':'PR',
'PRSE2': "PR",
'PRAN3':"PR",
'PSME':"PS",
'QUAL':"QU",
'QUCO2':"QU",
'QUCH':"QU",
'QUCH2':"QU",
'QUHE2':'QU',
'QUERC':"QU",
'QUGE2':"QU",
'QUSH':"QU",
'QULA2':'QU',
"QUPH":"QU",
'QULA3':"QU",
'QUERCUS':"QU",
'QULY':"QU",
'QUMA3':"QU",
'QUMA13':"QU",
'THUJA':"TU",
'PISA2':"PI2",
'TABR2':"TA",
'QUDO':"QU",
'MEPO5':'ME',
'QUMI':"QU",
'QUFA':"QU",
'QUMO4':"QU",
'QUMU':"QU",
'QUNI':"QU",
'QUKE':"QU",
'QUVE':'QU',
'QUWI2':"QU",
'QUPA5':"QU",
'QURU':"QU",
'QUST':"QU",
'RHGL':"RH",
"ROPS":"RO",
'SASSA':'SA',
'SALIX':'SA',
'SYOC':"SY",
'SILA20':"SI",
'SWMA2':"SW",
'TRSE6':"TR",
'TSCA':"TS",
'TSHE':"TS",
'TIAM':"TI",
'TAHE':"TA",
'ULAL':"UL",
'ULAM':"UL",
'ULMUS':"UL",
'ULCR':"UL",
'ULRU':"UL",
}
import os
os.chdir("/blue/ewhite/s.marconi/NeonSpeciesClassification/")
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import TomekLinks
from collections import Counter
from src.hdr import *
from sklearn.preprocessing import normalize
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from mlxtend.classifier import StackingClassifier
from sklearn.linear_model import LogisticRegressionCV
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from mlxtend.classifier import SoftmaxRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import RUSBoostClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
domainid = {
"GUAN": "D04",
"BART": "D01",
"HARV": "D01",
"STEI": "D05",
"TREE": "D05",
"UNDE": "D05",
"SERC": "D02",
"SCBI": "D02",
"OSBS": "D03",
"MLBS": "D07",
"DELA": "D08",
"TALL": "D08",
"BLAN": "D02",
"UKFS": "D06",
"RMNP": "D10",
"BONA": "D19",
"MOAB": "D13",
"DEJU": "D19",
"LENO": "D08",
"DSNY": "D03",
"JERC": "D03",
"KONZ": "D06",
"CLBJ": "D11",
"YELL": "D12",
"NIWO": "D13",
"ABBY": "D16",
"WREF": "D16",
"SJER": "D17",
"PUUM": "D20"
}
def categorical_encoder(cats,y):
import category_encoders as ce
le = LabelEncoder()
le.fit(y)
le = le.transform(y)
enc = ce.LeaveOneOutEncoder(cols=['siteID'])
# enc = enc.fit(cats).transform(cats)
train_enc = enc.fit_transform(cats,le)
return(train_enc)
# prepare input data
def prepare_inputs(X_train, X_test, cats = ['domainID', 'siteID']):
X_train_enc, X_test_enc = list(), list()
# label encode each column
for i in cats:
le = LabelEncoder()
le.fit(X_train[i])
# encode
train_enc = le.transform(X_train[i])
test_enc = le.transform(X_test[i])
# store
X_train_enc.append(train_enc)
X_test_enc.append(test_enc)
return X_train_enc, X_test_enc
min_class = Counter(y_train.taxonID)
unsuited = pd.DataFrame(min_class.items())
only_one = unsuited.iloc[:,1]<2
unsuited = unsuited[only_one][0]
#unsuited = pd.DataFrame(min_class.items())
doble_unsuited = X_train[y_train.taxonID.isin(unsuited)]
X_train=X_train.append(doble_unsuited)
doble_unsuited = y_train[y_train.taxonID.isin(unsuited)]
y_train=y_train.append(doble_unsuited)
min_class = Counter(y_train.taxonID)
min_class = min_class[min(min_class, key=min_class.get)]
min_class
# dimensionality reduction
def kld_reduction(brick, kld_out):
from sklearn import preprocessing
refl = brick.drop(['individualID'], axis=1)
scaler = preprocessing.StandardScaler().fit(refl)
refl = scaler.transform(refl)
kld_groups = getClusters(refl, numBands = 15)
np.savetxt(kld_out, kld_groups, delimiter=",")
individualID=brick["individualID"]
#
brick = brick.drop(columns=['individualID'])
brick = brick.values
all_data = np.zeros([brick.shape[0],1])
for jj in np.unique(kld_groups):
which_bands = kld_groups == jj
#min
new_col = np.apply_along_axis(min, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#mean
new_col = np.apply_along_axis(np.mean, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#max
new_col = np.apply_along_axis(max, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#reappend individualID on the all data dataframe
all_data = pd.DataFrame(all_data)
all_data["individualID"] = individualID
all_data = all_data.drop([0], axis=1)
#
#shift back individualID to first row
cols = list(all_data.columns)
cols = [cols[-1]] + cols[:-1]
all_data = all_data[cols]
return all_data
brick = pd.read_csv("./data/features_0411.csv") #"./data/brdf_spectra_2104b.csv")
metadata = pd.read_csv("./data/metadata_0411.csv") #"./data/metadata_2104b.csv")
metadata = metadata[["individualID", "groupID", "plotID","siteID","elevation","latitude", "longitude",
"taxonID"]]
kld_out="./data/tmp_grps.csv"#"./data/kld_grps_2104b.csv"
nbands = brick.shape[0]
brick.iloc[:,1:nbands] = normalize(brick.iloc[:,1:nbands])
brick = kld_reduction(brick, kld_out)
foo = brick.drop(columns=[ 'individualID'])
ele1 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_all_df_vst.csv")
ele2 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_df_vst.csv")
ele3 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR//elevation_sp_3.csv")
ele4 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_sp_2.csv")
ele5 = pd.read_csv("/orange/ewhite/s.marconi/brdf_classification/elevation/elevation_sp_2018.csv")
elevation = ele1.append(ele2, ignore_index=True)
elevation = elevation.append(ele3, ignore_index=True)
elevation = elevation.append(ele4, ignore_index=True)
elevation = elevation.append(ele5, ignore_index=True)
elevation.to_csv("/orange/ewhite/s.marconi/elevation_from_sensor.csv")
elevation = elevation.groupby('individualID').mean()
elevation=elevation[["elevation"]]
elevation.reset_index(inplace=True)
elevation
#metadata = metadata.join(elevation.set_index('individualID'), on='individualID')
metadata = metadata.dropna()
for siteID in domainid.keys():
data = pd.concat([metadata, foo], axis=1)
#data = brick.set_index('individualID').join(metadata.set_index('individualID'))
data.reset_index(inplace=True)
is_bad_genus = ["MAGNO", "AMLA"]
is_bad_genus = data['taxonID'].isin(is_bad_genus)
data = data[~is_bad_genus]
is_bad_site = [siteID] #["KONZ", "ONAQ", "MOAB", "PUUM", "YELL", "GUAN", "BLAN"] #YELL
is_bad_site = data['siteID'].isin(is_bad_site)
data = data[is_bad_site]
ave_coords= data[["latitude", "longitude", "siteID"]].groupby(['siteID'], as_index=False).mean()
data = data.drop(columns=["latitude", "longitude"]).set_index('siteID').join(ave_coords.set_index('siteID'))
data = data.dropna()
data = data.drop(columns=['index', 'plotID'])
species_id = data.taxonID.unique()
#splin into train and test by chosing columns
train = data.groupID == "train"
test = data.groupID == "test"
y_test = data[['individualID','taxonID', 'groupID']][~train]
X_test = data.drop(columns=['individualID', 'taxonID', 'groupID'])[~train]
X_train = data.drop(columns=['individualID', 'taxonID', 'groupID'])[train]
y_train = data[['taxonID']][train]
y = data[['taxonID']]
# oversample using SMOTENC in order not to loose the categorical effects
#get relatie frequency of each class
ratios_for_each = Counter(y_train.taxonID)
ratios_for_each = pd.DataFrame.from_dict(ratios_for_each, orient='index').reset_index()
#ratios_for_each.iloc[:,1] = ratios_for_each.iloc[:,1]
#remove mean of kld class (redundant feature)
cat_col = X_train.shape[1]
cols = np.arange(start = 2, stop = cat_col-2, step=3)
#X_train.drop(X_train.columns[cols],axis=1,inplace=True)
#X_test.drop(X_test.columns[cols],axis=1,inplace=True)
cat_col = X_train.shape[1]
X_train.columns
#undersample
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import NeighbourhoodCleaningRule
from imblearn.under_sampling import TomekLinks
from imblearn.combine import SMOTETomek
min_class = Counter(y_train.taxonID)
unsuited = pd.DataFrame(min_class.items())
only_one = unsuited.iloc[:,1]<2
unsuited = unsuited[only_one][0]
#unsuited = pd.DataFrame(min_class.items())
doble_unsuited = X_train[y_train.taxonID.isin(unsuited)]+0.00001
X_train=X_train.append(doble_unsuited)
doble_unsuited = y_train[y_train.taxonID.isin(unsuited)]
y_train=y_train.append(doble_unsuited)
min_class = Counter(y_train.taxonID)
min_class = min_class[min(min_class, key=min_class.get)]
smotenc = SMOTENC(random_state=0, categorical_features = [0,cat_col-2,cat_col-1], k_neighbors=min_class-1)
smt = SMOTETomek(random_state=42, smote=smotenc)
X_res, y_res = smt.fit_resample(X_train, y_train)
min_class = Counter(y_train.taxonID)
new_df = | pd.merge(X_res, ave_coords, how='left', left_on=['latitude', 'longitude'], right_on = ['latitude', 'longitude']) | pandas.merge |
#!/usr/bin/env python3
'''
Script to update all the data of a real estate agent database with new data.
Can be run as a script directly or via the use of the function 'update_real_estate_data'
'''
import os
import numpy
import pandas
from load_and_display_database import export_data_frame_to_excel, HOUSE_DATA_FILE_NAME
from data_calls_real_estate_agent import get_arnold_taal_data,\
get_bvl_data, get_langezaal_data, get_elzenaar_data, get_oltshoorn_data, get_estata_data, \
get_nelisse_data, get_doen_data, get_van_aalst_data, \
get_belderbos_data, get_hekking_data, get_klap_makelaars_data, get_diva_makelaars_data,\
get_frisia_makelaars_data, get_oltshoorn_data\
# Only functions which reliably provided data are added to this list.
real_estate_agent_functions = [get_arnold_taal_data, get_oltshoorn_data, get_frisia_makelaars_data,
get_bvl_data, get_langezaal_data,
get_elzenaar_data, get_oltshoorn_data, get_estata_data, get_nelisse_data, get_doen_data,
get_belderbos_data, get_van_aalst_data, get_hekking_data, get_klap_makelaars_data,
get_diva_makelaars_data]
directory = os.path.dirname(os.path.abspath(__file__))
HOUSE_DATA_PATH = os.path.join(directory, HOUSE_DATA_FILE_NAME)
COLUMNS_DATA = ("Address", "Link", "Name real estate agent", "specs checked", "first found", "last found", "available")
def update_real_estate_data(path, new_database=False):
new_house_data = get_current_data_of_all_real_estate_agents()
# Get column name from the end so that extra columns can be added in the middle if needed.
new_house_data[COLUMNS_DATA[-3]] = numpy.nan
new_house_data[COLUMNS_DATA[-2]] = pandas.Timestamp.now()
new_house_data[COLUMNS_DATA[-1]] = True
if not new_database:
old_house_data = | pandas.read_pickle(path) | pandas.read_pickle |
import pandas as pd
import datetime
import dateutil.parser
import Utils
#
# given a synthea object, covert it to it's equivalent omop objects
#
class SyntheaToOmop6:
#
# Check the model matches
#
def __init__(self, model_schema, utils):
self.model_schema = model_schema
self.utils = utils
#
# synthea patients to omop
#
def patientsToOmop(self, df, personmap, person_id, location_id):
#df = df.sort_values('Id') sort to get better match to original synthea to omop conversion for comparison
df['persontmp'] = df.index + person_id # copy index into a temp column. If accessed directly corrupts dataframe
df['locationtmp'] = df.index + location_id # copy index into a temp column. If accessed directly corrupts dataframe
person = pd.DataFrame(columns=self.model_schema['person'].keys())
person['person_id'] = df['persontmp']
person['gender_concept_id'] = df['GENDER'].apply(self.utils.getGenderConceptCode)
person['year_of_birth'] = df['BIRTHDATE'].apply(self.utils.getYearFromSyntheaDate)
person['month_of_birth'] = df['BIRTHDATE'].apply(self.utils.getMonthFromSyntheaDate)
person['day_of_birth'] = df['BIRTHDATE'].apply(self.utils.getDayFromSyntheaDate)
person['race_concept_id'] = df['RACE'].apply(self.utils.getRaceConceptCode)
person['ethnicity_concept_id'] = df['ETHNICITY'].apply(self.utils.getEthnicityConceptCode)
person['birth_datetime'] = df['BIRTHDATE'].apply(self.utils.getDefaultTimestamp)
person['death_datetime'] = df['DEATHDATE'].apply(self.utils.getDefaultTimestamp)
person['location_id'] = df['locationtmp']
person['gender_source_value'] = df['GENDER']
person['person_source_value'] = df['Id']
person['gender_source_concept_id'] = '0'
person['race_source_value'] = df['RACE']
person['race_source_concept_id'] = '0'
person['ethnicity_source_value'] = df['ETHNICITY']
person['ethnicity_source_concept_id'] = '0'
personappend = pd.DataFrame(columns=["person_id","synthea_patient_id"])
personappend["person_id"] = person['person_id']
personappend["synthea_patient_id"] = df['Id']
personmap = personmap.append(personappend)
person = person[person['gender_concept_id'] != 0] # filter out person's with missing or unknown gender
location = pd.DataFrame(columns=self.model_schema['location'].keys())
location['location_id'] = df['locationtmp']
location['address_1'] = df['ADDRESS']
location['city'] = df['CITY']
location['state'] = df['STATE']
location['zip'] = df['ZIP']
location['county'] = df['COUNTY']
location['location_source_value'] = df['Id']
location['latitude'] = df['LAT']
location['longitude'] = df['LON']
# create empty death dataframe
death = pd.DataFrame()
return (person, location, death, personmap, person_id + len(person), location_id + len(location))
def conditionsToOmop(self, df, srctostdvm, condition_occurrence_id, drug_exposure_id, observation_id, personmap, visitmap):
df['conditiontmp'] = df.index + condition_occurrence_id # copy index into a temp column.
df['drugexposuretmp'] = df.index + drug_exposure_id # copy index into a temp column.
df['observationtmp'] = df.index + observation_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
condition_occurrence = pd.DataFrame(columns=self.model_schema['condition_occurrence'].keys())
condition_occurrence['condition_occurrence_id'] = df['conditiontmp']
condition_occurrence['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Condition') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
condition_occurrence['condition_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
condition_occurrence['condition_start_date'] = df['START']
condition_occurrence['condition_start_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
condition_occurrence['condition_end_date'] = df['STOP']
condition_occurrence['condition_end_datetime'] = df['STOP'].apply(self.utils.getDefaultTimestamp)
condition_occurrence['condition_type_concept_id'] = '32020'
condition_occurrence['stop_reason'] = '0'
condition_occurrence['visit_occurrence_id'] = df['visit_occurrence_id']
condition_occurrence['visit_detail_id'] = '0'
condition_occurrence['condition_source_value'] = df['CODE']
condition_occurrence['condition_source_concept_id'] = df['CODE']
drug_exposure = pd.DataFrame(columns=self.model_schema['drug_exposure'].keys())
drug_exposure['drug_exposure_id'] = df['drugexposuretmp']
drug_exposure['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Drug') & (srctostdvm["target_vocabulary_id"]=='RxNorm') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
drug_exposure['drug_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
drug_exposure['drug_exposure_start_date'] = df['START']
drug_exposure['drug_exposure_start_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
drug_exposure['drug_exposure_end_date'] = df['STOP']
drug_exposure['drug_exposure_end_datetime'] = df['STOP'].apply(self.utils.getDefaultTimestamp)
drug_exposure['verbatim_end_date'] = df['STOP']
drug_exposure['visit_occurrence_id'] = df['visit_occurrence_id']
drug_exposure['drug_source_value'] = df['CODE']
drug_exposure['drug_source_concept_id'] = df['CODE']
drug_exposure['drug_type_concept_id'] = '581452'
drug_exposure['refills'] = '0'
drug_exposure['quantity'] = '0'
drug_exposure['days_supply'] = '0'
drug_exposure['route_concept_id'] = '0'
drug_exposure['lot_number'] = '0'
drug_exposure['visit_detail_id'] = '0'
observation = pd.DataFrame(columns=self.model_schema['observation'].keys())
observation['observation_id'] = df['observationtmp']
observation['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Observation') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
observation['observation_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
observation['observation_date'] = df['START']
observation['observation_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
observation['value_as_concept_id'] = '0'
observation['qualifier_concept_id'] = '0'
observation['unit_concept_id'] = '0'
observation['visit_occurrence_id'] = df['visit_occurrence_id']
observation['visit_detail_id'] = '0'
observation['observation_source_value'] = df['CODE']
observation['observation_source_concept_id'] = df['CODE']
observation['observation_type_concept_id'] = '38000280'
return (condition_occurrence, drug_exposure, observation, condition_occurrence_id + len(condition_occurrence) , drug_exposure_id + len(drug_exposure), observation_id + len(observation))
def careplansToOmop(self, df):
pass
def observationsToOmop(self, df, srctostdvm, srctosrcvm, measurement_id, personmap,visitmap):
# filter synthea observations with no encounter (original etl does this)
df['measurementtmp'] = df.index + measurement_id # copy index into a temp column.
df = df[~df.ENCOUNTER.isnull()]
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
measurement = pd.DataFrame(columns=self.model_schema['measurement'].keys())
measurement['measurement_id'] = df['measurementtmp']
measurement['person_id'] = df['person_id']
measurement['measurement_date'] = df['DATE']
measurement['measurement_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
measurement['measurement_time'] = df['DATE'] # check
measurement['visit_occurrence_id'] = df['visit_occurrence_id']
measurement['visit_detail_id'] = '0'
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Measurement') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df[['CODE']],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
measurement['measurement_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
measurement['measurement_source_value'] = df['CODE']
measurement['measurement_source_concept_id'] = df['CODE']
measurement['measurement_type_concept_id'] = '5001'
measurement['operator_concept_id'] = '0'
measurement['value_as_number'] = df['VALUE']
measurement['value_as_concept_id'] = '0'
measurement['unit_source_value'] = df['UNITS']
measurement['value_source_value'] = df['VALUE']
return (measurement, measurement_id + len(measurement))
def proceduresToOmop(self, df, srctostdvm, procedure_occurrence_id, personmap, visitmap):
df['proceduretmp'] = df.index + procedure_occurrence_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
# do procedures really map to measurements? There is no value and units?
#measurement = pd.DataFrame(columns=self.model_schema['measurement'].keys())
#measurement['person_id'] = df['PATIENT'].apply(self.patienthash)
#measurement['measurement_date'] = df['DATE']
#measurement['measurement_time'] = df['DATE'] # check
#measurement['value_as_number'] = df['VALUE']
#measurement['visit_occurrence_id'] = df['CODE']
#measurement['measurement_concept_id'] = df['CODE']
#measurement['measurement_type_concept_id'] = '5001'
#measurement['measurement_source_value'] = df['CODE']
#measurement['measurement_source_concept_id'] = df['CODE']
#measurement['unit_source_value'] = df['UNITS']
#measurement['value_source_value'] = df['VALUE']
procedure_occurrence = pd.DataFrame(columns=self.model_schema['procedure_occurrence'].keys())
procedure_occurrence['procedure_occurrence_id'] = df['proceduretmp']
procedure_occurrence['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Procedure') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df[['CODE']],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
procedure_occurrence['procedure_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
procedure_occurrence['procedure_date'] = df['DATE']
procedure_occurrence['procedure_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
procedure_occurrence['visit_occurrence_id'] = df['visit_occurrence_id']
procedure_occurrence['visit_detail_id'] = '0'
procedure_occurrence['procedure_type_concept_id'] = '38000275'
procedure_occurrence['modifier_concept_id'] = '0'
procedure_occurrence['procedure_source_value'] = df['CODE']
procedure_occurrence['procedure_source_concept_id'] = df['CODE']
return (procedure_occurrence, procedure_occurrence_id + len(procedure_occurrence))
def immunizationsToOmop(self, df, srctostdvm, drug_exposure_id, personmap, visitmap):
df['drugexposuretmp'] = df.index + drug_exposure_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = | pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left') | pandas.merge |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = | pd.DataFrame(data, index=["count", "mean", "std", "min"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = | ujson.decode(encoded) | pandas._libs.json.decode |
# -*- coding: utf-8 -*-
# author: <NAME>
# Email: <EMAIL>
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import generators
from __future__ import with_statement
import re
from bs4 import BeautifulSoup
from concurrent import futures
import os
import sys
import traceback
import time
import datetime
import pandas as pd
import requests
import json
import shutil
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from fake_useragent import UserAgent
from openpyxl import load_workbook
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.header import Header
############ 全局变量初始化 ##############
HEADERS = dict()
# 并发线程数
NUM_THREADS = None
# 城市选择
city_dict = {
"成都": "cd",
"北京": "bj",
"上海": "sh",
"广州": "gz",
"深圳": "sz",
"南京": "nj",
"合肥": "hf",
"杭州": "hz",
}
# 是否打印HTTP错误
PRINT = True if ((len(sys.argv) > 1) and (sys.argv[1] == 'true')) else False
# 伪造User-Agent库初始化
ua = UserAgent()
# 不使用代理
proxies = None
WORKPATH="/home/frank/workspace/lianjia/data"
CITY = city_dict["北京"]
""" HTTP GET 操作封装 """
def get_bs_obj_from_url(http_url):
done = False
exception_time = 0
HEADERS["User-Agent"] = ua.random
while not done:
try:
if PRINT:
print("正在获取 {}".format(http_url))
r = requests.get(http_url, headers=HEADERS, proxies=proxies, timeout=3)
bs_obj = BeautifulSoup(r.text, "lxml")
done = True
except Exception as e:
if PRINT:
print(e)
exception_time += 1
time.sleep(1)
if exception_time > 10:
return None
return bs_obj
""" 判断一个字符串是否可以转成数字 """
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def esf_mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print("{} create successfully.".format(path))
return True
else:
print("{} already exist.".format(path))
return False
def get_district_from_city(city):
print("---get {} districts---".format(city))
city_url = "http://{}.lianjia.com".format(city)
http_url = city_url + "/ershoufang"
bs_obj = get_bs_obj_from_url(http_url)
parent_div = bs_obj.find("div", {"data-role": "ershoufang"})
a_list = parent_div.find_all("a")
district_list = [a.attrs["href"].replace("/ershoufang/", "")[:-1]
for a in a_list
if a.attrs['href'].startswith("/ershoufang")]
print("---total {} districts---".format(len(district_list)))
return district_list
def get_district_name_from_city(city):
print("---get {} districts---".format(city))
city_url = "http://{}.lianjia.com".format(city)
http_url = city_url + "/ershoufang"
bs_obj = get_bs_obj_from_url(http_url)
parent_div = bs_obj.find("div", {"data-role": "ershoufang"})
a_list = parent_div.find_all("a")
name_list = [a.get_text() for a in a_list
if a.attrs['href'].startswith("/ershoufang")]
print("---total {} districts---".format(len(name_list)))
return name_list
def get_esf_from_district(city, district):
http_url = "http://{}.lianjia.com/ershoufang/{}".format(city, district)
bs_obj = get_bs_obj_from_url(http_url)
esf_list = []
try:
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
#try again
try:
bs_obj = get_bs_obj_from_url(http_url)
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
return esf_list
print("---district {} total ershoufang numbers: {}---".format(district, total_esf_num))
if total_esf_num == 0:
print("---district {} total get {}/{}---\n".format(district, len(esf_list), total_esf_num))
return esf_list
for price in range(1, 9):
esf_list_partial = get_esf_id_in_price(city, district, price)
if esf_list_partial is not None and len(esf_list_partial) > 0:
esf_list += esf_list_partial
print("---district {} total get {}/{}---\n".format(district, len(esf_list), total_esf_num))
return esf_list
def get_esf_id_in_price(city, district, price):
http_url = "http://{}.lianjia.com/ershoufang/{}/p{}".format(city, district, price)
bs_obj = get_bs_obj_from_url(http_url)
total_esf_num = 0
try:
total_esf_num = int(bs_obj.find("h2", {"class": "total fl"}).find("span").get_text())
except Exception as e:
print(" price {} get error.".format(price))
pass
#print("------price {} total : {}---".format(price, total_esf_num))
esf_list = []
if total_esf_num == 0:
print(" price {} finish---done.".format(price))
return esf_list
try:
page_box = bs_obj.find("div", {"class": "page-box house-lst-page-box"})
total_pages = int(json.loads(page_box.attrs["page-data"])["totalPage"])
except Exception as e:
print(" price {} page get error.".format(price))
return esf_list
with futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
future_list = []
for page_no in range(1, total_pages + 1):
future_list.append(executor.submit(get_esf_id_in_page, city, district, price, page_no))
fail_list = []
count = 0
for future in futures.as_completed(future_list):
page_no, esf_list_partial = future.result()
if esf_list_partial is None or len(esf_list_partial) == 0:
fail_list.append(page_no)
else:
esf_list += esf_list_partial
count += 1
sys.stdout.write("\r price {} finish {}/{}".format(price, len(esf_list), total_esf_num))
for page_no in fail_list:
_, esf_list_partial = get_esf_id_in_page(city, district, price, page_no)
if esf_list_partial is not None and len(esf_list_partial) > 0:
esf_list += esf_list_partial
count += 1
sys.stdout.write("\r price {} finish {}/{}".format(price, len(esf_list), total_esf_num))
print("---done.")
return esf_list
def get_esf_id_in_page(city, district, price, page_no):
http_url = "http://{}.lianjia.com/ershoufang/{}/pg{}p{}".format(city, district, page_no, price)
bs_obj = get_bs_obj_from_url(http_url)
if bs_obj is None:
print("get ershoufang id, price {} page {} is none".format(price, page_no))
return None
parent_list = bs_obj.find_all("li", {"class": "clear"})
esf_list = []
if not (len(parent_list) == 0):
for li in parent_list:
esf_url = str(li.find("div", {"class": "title"}).find("a").attrs["href"])
esf_id = "".join(list(filter(str.isdigit, esf_url)))
esf_list.append(esf_id)
return page_no, esf_list
def get_esf_of_city(city):
district_list = get_district_from_city(city)
esf_list = []
for district in district_list:
esf_of_district = get_esf_from_district(city, district)
esf_list += esf_of_district
esf_list = sorted(set(esf_list), key=esf_list.index)
return esf_list
def get_esf_info(city, esf_id):
http_url = "https://{}.lianjia.com/ershoufang/{}.html".format(city, esf_id)
bs_obj = get_bs_obj_from_url(http_url)
df = pd.DataFrame()
if bs_obj is not None:
try:
test = bs_obj.find("div", {"class": "icon-404 icon fl"})
if test is not None:
return esf_id, df
total_price = bs_obj.find("span", {"class": "total"}).get_text()
if not is_number(total_price):
return esf_id, df
unit_price = bs_obj.find("div", {"class": "unitPrice"}).get_text().replace("元/平米", "")
huxing = bs_obj.find("div", {"class": "room"}).find("div", {"class": "mainInfo"}).get_text()
xiaoqu = bs_obj.find("div", {"class": "communityName"}).find("a").get_text()
area_info = bs_obj.find("div", {"class": "areaName"}).find_all("a")
chengqu = area_info[0].get_text()
quyu = area_info[1].get_text()
base_info = bs_obj.find("div", {"class": "newwrap baseinform"})
# 基本属性
base = base_info.find("div", {"class": "base"}).get_text()
louceng = None if "所在楼层" not in base else base.split("所在楼层")[1].split("(")[0]
zonglouceng = None if "所在楼层" not in base else base.split("(共")[1].split("层")[0]
jianzhumianji = None if "建筑面积" not in base else base.split("建筑面积")[1].split("㎡")[0]
if not is_number(jianzhumianji):
return esf_id, df
huxingjiegou = None if "户型结构" not in base else base.split("户型结构")[1].split("\n")[0]
if "套内面积" not in base:
taoneimianji = None
elif "暂无数据" in base.split("套内面积")[1].split("\n")[0]:
taoneimianji = None
else:
taoneimianji = base.split("套内面积")[1].split("㎡")[0]
jianzhuleixing = None if "建筑类型" not in base else base.split("建筑类型")[1].split("\n")[0]
chaoxiang = None if "房屋朝向" not in base else base.split("房屋朝向")[1].split("\n")[0]
jianzhujiegou = None if "建筑结构" not in base else base.split("建筑结构")[1].split("\n")[0]
zhuangxiu = None if "装修情况" not in base else base.split("装修情况")[1].split("\n")[0]
tihubili = None if "梯户比例" not in base else base.split("梯户比例")[1].split("\n")[0]
gongnuan = None if "供暖方式" not in base else base.split("供暖方式")[1].split("\n")[0]
dianti = None if "配备电梯" not in base else base.split("配备电梯")[1].split("\n")[0]
chanquan = None if "产权年限" not in base else base.split("产权年限")[1].split("\n")[0]
yongshui = "商水" if base_info.find(text="商水") is not None else "民水"
yongdian = "商电" if base_info.find(text="商电") is not None else "民电"
# 交易属性
trans = base_info.find("div", {"class": "transaction"}).get_text()
guapaishijian = None if "挂牌时间" not in trans else trans.split("挂牌时间")[1].strip().split("\n")[0]
jiaoyiquanshu = None if "交易权属" not in trans else trans.split("交易权属")[1].strip().split("\n")[0]
fangwuyongtu = None if "房屋用途" not in trans else trans.split("房屋用途")[1].strip().split("\n")[0]
fangwunianxian = None if "房屋年限" not in trans else trans.split("房屋年限")[1].strip().split("\n")[0]
chanquansuoshu = None if "产权所属" not in trans else trans.split("产权所属")[1].strip().split("\n")[0]
diyaxinxi = None if "抵押信息" not in trans else trans.split("抵押信息")[1].strip().split("\n")[0]
df = pd.DataFrame(index=[esf_id], data=[[http_url, chengqu, quyu, xiaoqu,
huxing, total_price, unit_price, jianzhumianji,
taoneimianji, chaoxiang, louceng, zonglouceng,
huxingjiegou, jianzhuleixing, jianzhujiegou,
fangwuyongtu, jiaoyiquanshu, fangwunianxian,
guapaishijian, zhuangxiu, tihubili, gongnuan,
dianti, chanquan, yongshui, yongdian,
chanquansuoshu, diyaxinxi]],
columns=["URL", "城区", "片区", "小区",
"户型", "总价", "单价", "建筑面积",
"套内面积", "朝向", "楼层", "总楼层",
"户型结构", "建筑类型", "建筑结构",
"房屋用途", "交易权属", "房屋年限",
"挂牌时间", "装修", "梯户比例", "供暖",
"配备电梯", "产权", "用水", "用电",
"产权所属", "抵押信息"])
except Exception as e:
print("[E]: get_esf_info, esf_id =", esf_id, e)
traceback.print_exc()
pass
return esf_id, df
def get_esf_info_from_esf_list(city, esf_list):
df_esf_info = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.