prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import matplotlib.pyplot as plt
from alpha_vantage.timeseries import TimeSeries
#defining alpha-vantage API key
api = '888888888'
#collecting data from API/url
deaths = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv")
confirm = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
american_markert = pd.read_csv('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=IBM&outputsize=full&apikey=' + api + '&datatype=csv')
Canadian_markert = pd.read_csv('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=IBM&outputsize=full&apikey=' + api + '&datatype=csv')
Travel_sector = pd.read_csv('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=LUV&outputsize=full&apikey=' + api + '&datatype=csv')
Real_Estate = pd.read_csv( 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=KIM&outputsize=full&apikey=' + api + '&datatype=csv')
silver = pd.read_csv('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=SIL&outputsize=full&apikey=' + api + '&datatype=csv')
#total confirmed cases
total_c = confirm.sum(axis = 0, skipna = True)
totalc = total_c.to_list()
#total confirmed deaths
total_d = deaths.sum(axis = 0, skipna = True)
totald = total_d.to_list()
#converting date, month, year
timestamp = deaths.columns.to_list()
#removing unnecessary data
del timestamp[0:4]
del totalc[0:3]
del totald[0:3]
#data categorization/classification on covid dataframe
covid_cd = pd.DataFrame(list(zip(timestamp,totalc,totald)), columns = ['Timestamp', 'Total_Confirmed_Cases', 'Total_Deaths'])
covid_cd['Total_Confirmed_Cases'] = covid_cd.Total_Confirmed_Cases.astype('int')
covid_cd['Total_Deaths'] = covid_cd.Total_Deaths.astype('int')
covid_cd['Timestamp'] = pd.to_datetime(covid_cd['Timestamp'])
#data categorization/classification on american market data
american_markert['timestamp'] = pd.to_datetime(american_markert['timestamp'])
a_mark = american_markert.loc[(american_markert['timestamp'] >= '2020-01-22') & (american_markert['timestamp'] <= '2021-03-06')]
a_market = a_mark.sort_values(by='timestamp',ascending = True)
ibm_ts = a_mark['timestamp'].to_list()
ibm_high = a_mark['high'].to_list()
ibm_low = a_mark['low'].to_list()
IBM_data = pd.DataFrame(list(zip(ibm_ts,ibm_high,ibm_low)), columns = ['Timestamp', 'IBM_High', 'IBM_Low'])
final_1 = covid_cd.merge(IBM_data,how='outer',left_on=['Timestamp'],right_on=["Timestamp"])
final_1['American Market'] = 'IBM'
#data categorization/classification on canadian market data
Canadian_markert['timestamp'] = pd.to_datetime(Canadian_markert['timestamp'])
c_mark = Canadian_markert.loc[(Canadian_markert['timestamp'] >= '2020-01-22') & (Canadian_markert['timestamp'] <= '2021-03-06')]
c_market = c_mark.sort_values(by='timestamp',ascending = True)
shop_ts = c_mark['timestamp'].to_list()
shop_high = c_mark['high'].to_list()
shop_low = c_mark['low'].to_list()
SHOP_data = pd.DataFrame(list(zip(shop_ts,shop_high,shop_low)), columns = ['Timestamp', 'SHOP_High', 'SHOP_Low'])
final_2 = final_1.merge(SHOP_data,how='outer',left_on=['Timestamp'],right_on=["Timestamp"])
final_2['Canadian Market'] = 'SHOP'
#data categorization/classification on travel sector data
Travel_sector['timestamp'] = pd.to_datetime(Travel_sector['timestamp'])
Travel = Travel_sector.loc[(Travel_sector['timestamp'] >= '2020-01-22') & (Travel_sector['timestamp'] <= '2021-03-06')]
Travel = Travel.sort_values(by='timestamp',ascending = True)
LUV_ts = Travel['timestamp'].to_list()
LUV_high = Travel['high'].to_list()
LUV_low = Travel['low'].to_list()
Travel_data = pd.DataFrame(list(zip(LUV_ts,LUV_high,LUV_low)), columns = ['Timestamp', 'LUV_High', 'LUV_Low'])
final_3 = final_2.merge(Travel_data,how='outer',left_on=['Timestamp'],right_on=["Timestamp"])
final_3['Travel'] = 'LUV'
#data categorization/classification on real estate data
Real_Estate['timestamp'] = pd.to_datetime(Real_Estate['timestamp'])
ree = Real_Estate.loc[(Real_Estate['timestamp'] >= '2020-01-22') & (Real_Estate['timestamp'] <= '2021-03-06')]
rea = ree.sort_values(by='timestamp',ascending = True)
r_ts = rea['timestamp'].to_list()
r_high = rea['high'].to_list()
r_low = rea['low'].to_list()
KIM_data = pd.DataFrame(list(zip(r_ts,r_high,r_low)), columns = ['Timestamp','KIM_High', 'KIM_Low'])
final_4 = final_3.merge(KIM_data,how='outer',left_on=['Timestamp'],right_on=["Timestamp"])
final_4['Real Estate'] = 'KIM'
#data categorization/classification on silver metal data
silver['timestamp'] = | pd.to_datetime(silver['timestamp']) | pandas.to_datetime |
from scipy import misc
import numpy as np
import pandas as pd
import cv2
import sys, getopt
def main(argv):
# Getting arguments
inputFile = ''
inputSharpness = 0
try:
opts, args = getopt.getopt(argv,"hi:ms:",["input-file=","min-sharpness="])
except getopt.GetoptError:
print ('bestmoments.py -i <input-file> -ms <min-sharpness>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('bestmoments.py -i <input-file> -ms <min-sharpness>')
sys.exit()
elif opt in ("-i", "--input-file"):
inputFile = arg
elif opt in ("-ms", "--min-sharpness"):
inputSharpness = arg
hcFaces = "haarcascades/haarcascade_frontalface_default.xml"
hcSmiles = "haarcascades/haarcascade_smile.xml"
faceCascade = cv2.CascadeClassifier(hcFaces)
smileCascade = cv2.CascadeClassifier(hcSmiles)
cap = cv2.VideoCapture(inputFile)
if not (cap.isOpened()):
sys.exit()
#cap.set(cv2.CAP_PROP_POS_FRAMES, 800)
bestMoments = []
lastFrameResult = 0
frameCount = 0
while(cap.isOpened()):
ret, frame = cap.read()
if (ret == False):
break
# Skipping frames after some result is found
frameCount += 1
if frameCount < (lastFrameResult + 24):
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.15,
minNeighbors = 10,
minSize = (100, 100),
flags = cv2.CASCADE_SCALE_IMAGE
)
# Highlighting faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
roiGray = gray[y:y+h, x:x+w]
roiGolor = frame[y:y+h, x:x+w]
if(len(faces) > 0):
smile = smileCascade.detectMultiScale(
roiGray,
scaleFactor = 1.15,
minNeighbors = 18,
minSize = (15, 15),
flags = cv2.CASCADE_SCALE_IMAGE
)
# Highlighting smiles
for (x, y, w, h) in smile:
gy, gx = np.gradient(gray)
gnorm = np.sqrt(gx**2 + gy**2)
sharpness = np.average(gnorm)
cv2.rectangle(roiGolor, (x, y), (x+w, y+h), (0, 255, 0), 1)
if(len(smile) > 0):
# Resizing
r = 640.0 / frame.shape[1]
dim = (640, int(frame.shape[0] * r))
resizedFrame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
# Getting frame sharpness
gy, gx = np.gradient(gray)
gnorm = np.sqrt(gx**2 + gy**2)
sharpness = np.average(gnorm)
# If sharpness is bellow 50, just ignore it
if(sharpness > inputSharpness):
bestMoments.append([resizedFrame, len(smile), sharpness])
lastFrameResult = frameCount
# Resizing
r = 640.0 / frame.shape[1]
dim = (640, int(frame.shape[0] * r))
resizedFrame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
# Output
cv2.namedWindow('Output', cv2.WINDOW_NORMAL)
cv2.imshow('Output', resizedFrame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if (bestMoments and len(bestMoments) > 0):
# Order by number of smiles and image sharpness
df = | pd.DataFrame(bestMoments, columns=['resized_frame', 'smiles', 'sharpness']) | pandas.DataFrame |
"""Main class and helper functions.
"""
import os
from enum import Enum
from collections import OrderedDict
from functools import reduce
from pathlib import Path
from typing import Any, Union, Optional
from typing import Iterable, Sized, Sequence, Mapping, MutableMapping
from typing import Tuple, List, Dict, KeysView
from copy import deepcopy
import numpy as np
from numpy import ma
import pandas as pd
from numpy.lib.recfunctions import rec_drop_fields
from pandas.core.index import RangeIndex
from pandas.api.types import is_string_dtype, is_categorical
from scipy import sparse
from scipy.sparse import issparse
from scipy.sparse.sputils import IndexMixin
from natsort import natsorted
# try importing zarr
try:
from zarr.core import Array as ZarrArray
except ImportError:
class ZarrArray:
@staticmethod
def __rep__():
return 'mock zarr.core.Array'
# try importing zappy
try:
from zappy.base import ZappyArray
except ImportError:
class ZappyArray:
@staticmethod
def __rep__():
return 'mock zappy.base.ZappyArray'
from . import h5py
from .layers import AnnDataLayers
from . import utils
from .utils import Index, get_n_items_idx
from .logging import anndata_logger as logger
from .compat import PathLike
class StorageType(Enum):
Array = np.ndarray
Masked = ma.MaskedArray
Sparse = sparse.spmatrix
ZarrArry = ZarrArray
ZappyArry = ZappyArray
@classmethod
def classes(cls):
print(ZarrArray)
return tuple(c.value for c in cls.__members__.values())
class BoundRecArr(np.recarray):
"""A :class:`numpy.recarray` to which fields can be added using ``.['key']``.
To enable this, it is bound to a instance of AnnData.
"""
_attr_choices = ['obsm', 'varm']
def __new__(cls, input_array: np.ndarray, parent: Any, attr: str):
"""
Parameters
----------
input_array
A (structured) numpy array.
parent
Any object to which the BoundRecArr shall be bound to.
attr
The name of the attribute as which it appears in parent.
"""
arr = np.asarray(input_array).view(cls)
arr._parent = parent
arr._attr = attr
return arr
def __array_finalize__(self, obj: Any):
if obj is None: return
self._parent = getattr(obj, '_parent', None)
self._attr = getattr(obj, '_attr', None)
def __reduce__(self) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
pickled_state = super().__reduce__()
new_state = pickled_state[2] + (self.__dict__, )
return pickled_state[0], pickled_state[1], new_state
def __setstate__(self, state: Sequence[Mapping[str, Any]]):
for k, v in state[-1].items():
self.__setattr__(k, v)
super().__setstate__(state[0:-1])
def copy(self, order='C') -> 'BoundRecArr':
new = super().copy()
new._parent = self._parent
return new
def flipped(self) -> 'BoundRecArr':
new_attr = (self._attr_choices[1] if self._attr == self._attr_choices[0]
else self._attr_choices[0])
return BoundRecArr(self, self._parent, new_attr)
def keys(self) -> Tuple[str, ...]:
return self.dtype.names
def __setitem__(self, key: str, arr: np.ndarray):
if not isinstance(arr, np.ndarray):
raise ValueError(
'Can only assign numpy ndarrays to .{}[{!r}], not objects of class {}'
.format(self._attr, key, type(arr))
)
if arr.ndim == 1:
raise ValueError('Use adata.obs or adata.var for 1-dimensional arrays.')
if self.shape[0] != arr.shape[0]:
raise ValueError(
'Can only assign an array of same length ({}), not of length {}.'
.format(self.shape[0], arr.shape[0])
)
# the following always allocates a new array
# even if the key already exists and dimensions match
# TODO: one could check for this case
# dtype
merged_dtype = []
found_key = False
for descr in self.dtype.descr:
if descr[0] == key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
found_key = True
else:
merged_dtype.append(descr)
if not found_key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
# create new array
new = np.empty(len(self), dtype=merged_dtype)
# fill the array
for name in new.dtype.names:
if name == key:
new[name] = arr
else:
new[name] = self[name]
# make it a BoundRecArr
# TODO: why can we not do this step before filling the array?
new = BoundRecArr(new, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def __delitem__(self, key: str):
"""Delete field with name."""
if key not in self.dtype.names:
raise ValueError(
'Currently, can only delete single names from {}.'
.format(self.dtype.names)
)
new_array = rec_drop_fields(self, key)
new = BoundRecArr(new_array, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def to_df(self) -> pd.DataFrame:
"""Convert to pandas dataframe."""
df = pd.DataFrame(index=RangeIndex(0, self.shape[0], name=None))
for key in self.keys():
value = self[key]
for icolumn, column in enumerate(value.T):
df['{}{}'.format(key, icolumn+1)] = column
return df
# for backwards compat
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk
return None
# for backwards compat
def _gen_keys_from_multicol_key(key_multicol, n_keys):
"""Generates single-column keys from multicolumn key."""
keys = [('{}{:03}of{:03}')
.format(key_multicol, i+1, n_keys) for i in range(n_keys)]
return keys
def df_to_records_fixed_width(df, var_len_str=True):
uns = {} # unstructured dictionary for storing categories
names = ['index']
if is_string_dtype(df.index):
if var_len_str:
index = df.index.values.astype(h5py.special_dtype(vlen=str))
else:
max_len_index = 0 if 0 in df.shape else df.index.map(len).max()
index = df.index.values.astype('S{}'.format(max_len_index))
else:
index = df.index.values
arrays = [index]
for k in df.columns:
names.append(k)
if is_string_dtype(df[k]) and not is_categorical(df[k]):
if var_len_str:
arrays.append(df[k].values.astype(h5py.special_dtype(vlen=str)))
else:
lengths = df[k].map(len)
if is_categorical(lengths): lengths = lengths.cat.as_ordered()
arrays.append(df[k].values.astype('S{}'.format(lengths.max())))
elif is_categorical(df[k]):
uns[k + '_categories'] = df[k].cat.categories
arrays.append(df[k].cat.codes)
else:
arrays.append(df[k].values)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}), uns
def _check_2d_shape(X):
"""Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
"""
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError('X needs to be 2-dimensional, not '
'{}-dimensional.'.format(len(X.shape)))
def _normalize_index(index, names):
if not isinstance(names, RangeIndex):
assert names.dtype != float and names.dtype != int, \
'Donโt call _normalize_index with non-categorical/string names'
# the following is insanely slow for sequences, we replaced it using pandas below
def name_idx(i):
if isinstance(i, str):
# `where` returns an 1-tuple (1D array) of found indices
i_found = np.where(names == i)[0]
if len(i_found) == 0: # returns array of length 0 if nothing is found
raise IndexError(
'Key "{}" is not valid observation/variable name/index.'
.format(i))
i = i_found[0]
return i
if isinstance(index, slice):
start = name_idx(index.start)
stop = name_idx(index.stop)
# string slices can only be inclusive, so +1 in that case
if isinstance(index.stop, str):
stop = None if stop is None else stop + 1
step = index.step
return slice(start, stop, step)
elif isinstance(index, (int, str)):
return name_idx(index)
elif isinstance(index, (Sequence, np.ndarray, pd.Index)):
# here, we replaced the implementation based on name_idx with this
# incredibly faster one
positions = pd.Series(index=names, data=range(len(names)))
positions = positions[index]
if positions.isnull().values.any():
raise KeyError(
'Indices "{}" contain invalid observation/variables names/indices.'
.format(index))
return positions.values
else:
raise IndexError('Unknown index {!r} of type {}'
.format(index, type(index)))
def _gen_dataframe(anno, length, index_names):
if isinstance(anno, pd.DataFrame):
return anno
if anno is None or len(anno) == 0:
_anno = pd.DataFrame(index=RangeIndex(0, length, name=None).astype(str))
else:
for index_name in index_names:
if index_name in anno:
_anno = pd.DataFrame(
anno, index=anno[index_name],
columns=[k for k in anno.keys() if k != index_name])
break
else:
_anno = pd.DataFrame(anno, index=RangeIndex(0, length, name=None).astype(str))
return _anno
class AnnDataFileManager:
"""Backing file manager for AnnData.
"""
def __init__(
self,
adata: 'AnnData',
filename: Optional[PathLike] = None,
filemode: Optional[str] = None,
):
self._adata = adata
self.filename = filename
self._filemode = filemode
self._file = None
if filename:
self.open()
def __repr__(self) -> str:
if self.filename is None:
return 'Backing file manager: no file is set.'
else:
return 'Backing file manager of file {}.'.format(self.filename)
def __getitem__(self, key: str) -> Union[h5py.Group, h5py.Dataset, h5py.SparseDataset]:
return self._file[key]
def __setitem__(self, key: str, value: Union[h5py.Group, h5py.Dataset, h5py.SparseDataset]):
self._file[key] = value
def __delitem__(self, key: str):
del self._file[key]
@property
def filename(self) -> Path:
return self._filename
@filename.setter
def filename(self, filename: Optional[PathLike]):
self._filename = None if filename is None else Path(filename)
def open(
self,
filename: Optional[PathLike] = None,
filemode: Optional[str] = None,
):
if filename is not None:
self.filename = filename
if filemode is not None:
self._filemode = filemode
if self.filename is None:
raise ValueError(
'Cannot open backing file if backing not initialized.')
self._file = h5py.File(self.filename, self._filemode, force_dense=True)
def close(self):
"""Close the backing file, remember filename, do *not* change to memory mode."""
if self._file is not None:
self._file.close()
def _to_memory_mode(self):
"""Close the backing file, forget filename, *do* change to memory mode."""
self._adata.__X = self._adata.X[()]
self._file.close()
self._file = None
self._filename = None
@property
def isopen(self) -> bool:
"""State of backing file."""
if self._file is None:
return False
# try accessing the id attribute to see if the file is open
return bool(self._file.id)
def _init_actual_AnnData(adata_view):
if adata_view.isbacked:
raise ValueError(
'You cannot modify elements of an AnnData view, '
'but need a copy of the subset.\n\n'
'Call `adata_subset = adata[index].copy(filename=...)`.')
adata_view._init_as_actual(adata_view.copy())
class _SetItemMixin:
def __setitem__(self, idx: Any, value: Any):
if self._view_args is None:
super().__setitem__(idx, value)
else:
adata_view, attr_name = self._view_args
_init_actual_AnnData(adata_view)
getattr(adata_view, attr_name)[idx] = value
class _ViewMixin(_SetItemMixin):
def __init__(self, *args, view_args: Tuple['AnnData', str] = None, **kwargs):
self._view_args = view_args
super().__init__(*args, **kwargs)
class ArrayView(_SetItemMixin, np.ndarray):
def __new__(
cls,
input_array: Sequence[Any],
view_args: Tuple['AnnData', str] = None,
):
arr = np.asarray(input_array).view(cls)
arr._view_args = view_args
return arr
def __array_finalize__(self, obj: Optional[np.ndarray]):
if obj is None: return
self._view_args = getattr(obj, '_view_args', None)
def keys(self) -> KeysView[str]:
# it's a structured array
return self.dtype.names
def copy(self, order: str = 'C') -> np.ndarray:
# we want a conventional array
return np.array(self)
def toarray(self) -> np.ndarray:
return self.copy()
class SparseCSRView(_ViewMixin, sparse.csr_matrix):
pass
class SparseCSCView(_ViewMixin, sparse.csc_matrix):
pass
class DictView(_ViewMixin, dict):
pass
class DataFrameView(_ViewMixin, pd.DataFrame):
_metadata = ['_view_args']
class Raw(IndexMixin):
def __init__(
self,
adata: Optional['AnnData'] = None,
X: Union[np.ndarray, sparse.spmatrix, None] = None,
var: Optional[BoundRecArr] = None,
varm: Optional[BoundRecArr] = None,
):
self._adata = adata
self._n_obs = adata.n_obs
if X is not None:
self._X = X
self._var = var
self._varm = varm
else:
self._X = None if adata.isbacked else adata.X.copy()
self._var = adata.var.copy()
self._varm = adata.varm.copy()
@property
def X(self):
if self._adata.isbacked:
if not self._adata.file.isopen: self._adata.file.open()
X = self._adata.file['raw.X']
if self._adata.isview: return X[self._adata._oidx, self._adata._vidx]
else: return X
else:
if self.n_obs == 1 and self.n_vars == 1:
return self._X[0, 0]
elif self.n_obs == 1 or self.n_vars == 1:
X = self._X
if issparse(self._X): X = self._X.toarray()
return X.flatten()
else:
return self._X
@property
def shape(self):
return self.X.shape
@property
def var(self):
return self._var
@property
def n_vars(self):
return self._var.shape[0]
@property
def n_obs(self):
return self._n_obs
@property
def varm(self):
return self._varm
@property
def var_names(self):
return self.var.index
def __getitem__(self, index):
oidx, vidx = self._normalize_indices(index)
if self._adata is not None or not self._adata.isbacked: X = self._X[oidx, vidx]
else: X = self._adata.file['raw.X'][oidx, vidx]
if isinstance(vidx, (int, np.int64)): vidx = slice(vidx, vidx+1, 1)
var = self._var.iloc[vidx]
if self._varm is not None:
varm = self._varm[vidx]
else:
varm = None
return Raw(self._adata, X=X, var=var, varm=varm)
def copy(self):
return Raw(self._adata, X=self._X.copy(), var=self._var.copy(),
varm=None if self._varm is None else self._varm.copy())
def _normalize_indices(self, packed_index):
# deal with slicing with pd.Series
if isinstance(packed_index, pd.Series):
packed_index = packed_index.values
if isinstance(packed_index, tuple):
if len(packed_index) != 2:
raise IndexDimError(len(packed_index))
if isinstance(packed_index[1], pd.Series):
packed_index = packed_index[0], packed_index[1].values
if isinstance(packed_index[0], pd.Series):
packed_index = packed_index[0].values, packed_index[1]
obs, var = super()._unpack_index(packed_index)
obs = _normalize_index(obs, self._adata.obs_names)
var = _normalize_index(var, self.var_names)
return obs, var
INDEX_DIM_ERROR_MSG = 'You tried to slice an AnnData(View) object with an' \
'{}-dimensional index, but only 2 dimensions exist in such an object.'
INDEX_DIM_ERROR_MSG_1D = '\nIf you tried to slice cells using adata[cells, ], ' \
'be aware that Python (unlike R) uses adata[cells, :] as slicing syntax.'
class IndexDimError(IndexError):
def __init__(self, n_dims):
msg = INDEX_DIM_ERROR_MSG.format(n_dims)
if n_dims == 1:
msg += INDEX_DIM_ERROR_MSG_1D
super().__init__(msg)
class AnnData(IndexMixin, metaclass=utils.DeprecationMixinMeta):
"""An annotated data matrix.
:class:`~anndata.AnnData` stores a data matrix :attr:`X` together with annotations
of observations :attr:`obs`, variables :attr:`var` and unstructured annotations :attr:`uns`.
.. figure:: https://falexwolf.de/img/scanpy/anndata.svg
:width: 350px
An :class:`~anndata.AnnData` object ``adata`` can be sliced like a pandas
dataframe, for instance, ``adata_subset = adata[:, list_of_variable_names]``.
:class:`~anndata.AnnData`'s basic structure is similar to R's ExpressionSet
[Huber15]_. If setting an ``.h5ad``-formatted HDF5 backing file ``.filename``,
data remains on the disk but is automatically loaded into memory if needed.
See this `blog post`_ for more details.
.. _blog post: http://falexwolf.de/blog/171223_AnnData_indexing_views_HDF5-backing/
Parameters
----------
X
A #observations ร #variables data matrix. A view of the data is used if the
data type matches, otherwise, a copy is made.
obs
Key-indexed one-dimensional observations annotation of length #observations.
var
Key-indexed one-dimensional variables annotation of length #variables.
uns
Key-index unstructured annotation.
obsm
Key-indexed multi-dimensional observations annotation of length #observations.
If passing a :class:`~numpy.ndarray`, it needs to have a structured datatype.
varm
Key-indexed multi-dimensional variables annotation of length #variables.
If passing a :class:`~numpy.ndarray`, it needs to have a structured datatype.
dtype
Data type used for storage.
shape
Shape tuple (#observations, #variables). Can only be provided if ``X`` is ``None``.
filename
Name of backing file. See :class:`anndata.h5py.File`.
filemode
Open mode of backing file. See :class:`anndata.h5py.File`.
layers
Dictionary with keys as layers' names and values as matrices of the same dimensions as X.
See Also
--------
read_h5ad
read_csv
read_excel
read_hdf
read_loom
read_zarr
read_mtx
read_text
read_umi_tools
Notes
-----
Multi-dimensional annotations are stored in :attr:`obsm` and :attr:`varm`.
Indexing into an AnnData object with a numeric is supposed to be positional,
like pandasโ :attr:`~pandas.DataFrame.iloc` accessor, while indexing with a string/categorical is
supposed to behave like :attr:`~pandas.DataFrame.loc`.
If the unstructured annotations :attr:`uns` contain a sparse matrix of shape
:attr:`n_obs` ร :attr:`n_obs`, these are sliced when calling ``[]``.
A data matrix is flattened if either :attr:`n_obs` or :attr:`n_vars` is 1, so that
numpy's slicing behavior is reproduced::
adata = AnnData(np.ones((2, 2)))
adata[:, 0].X == adata.X[:, 0]
:class:`~anndata.AnnData` stores observations (samples) of variables
(features) in the rows of a matrix. This is the convention of the modern
classics of statistics [Hastie09]_ and machine learning [Murphy12]_, the
convention of dataframes both in R and Python and the established statistics
and machine learning packages in Python (statsmodels_, scikit-learn_).
.. _statsmodels: http://www.statsmodels.org/stable/index.html
.. _scikit-learn: http://scikit-learn.org/
"""
_BACKED_ATTRS = ['X', 'raw.X']
# backwards compat
_H5_ALIASES = {
'X': {'X', '_X', 'data', '_data'},
'obs': {'obs', '_obs', 'smp', '_smp'},
'var': {'var', '_var'},
'uns': {'uns'},
'obsm': {'obsm', '_obsm', 'smpm', '_smpm'},
'varm': {'varm', '_varm'},
'layers': {'layers', '_layers'},
}
_H5_ALIASES_NAMES = {
'obs': {'obs_names', 'smp_names', 'row_names', 'index'},
'var': {'var_names', 'col_names', 'index'},
}
def __init__(
self,
X: Optional[Union[np.ndarray, sparse.spmatrix, pd.DataFrame]] = None,
obs: Optional[Union[pd.DataFrame, Mapping[str, Iterable[Any]]]] = None,
var: Optional[Union[pd.DataFrame, Mapping[str, Iterable[Any]]]] = None,
uns: Optional[Mapping[str, Any]] = None,
obsm: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None,
varm: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None,
layers: Optional[Mapping[str, Union[np.ndarray, sparse.spmatrix]]] = None,
raw: Optional[Raw] = None,
dtype: Union[np.dtype, str] = 'float32',
shape: Optional[Tuple[int, int]] = None,
filename: Optional[PathLike] = None,
filemode: Optional[str] = None,
asview: bool = False,
*, oidx: Index = None, vidx: Index = None):
if asview:
if not isinstance(X, AnnData):
raise ValueError('`X` has to be an AnnData object.')
self._init_as_view(X, oidx, vidx)
else:
self._init_as_actual(
X=X, obs=obs, var=var, uns=uns,
obsm=obsm, varm=varm, raw=raw,
layers=layers,
dtype=dtype, shape=shape,
filename=filename, filemode=filemode)
def _init_as_view(self, adata_ref: 'AnnData', oidx: Index, vidx: Index):
if adata_ref.isbacked and adata_ref.isview:
raise ValueError(
'Currently, you cannot index repeatedly into a backed AnnData, '
'that is, you cannot make a view of a view.')
self._isview = True
self._adata_ref = adata_ref
self._oidx = oidx
self._vidx = vidx
# the file is the same as of the reference object
self.file = adata_ref.file
# views on attributes of adata_ref
oidx_normalized, vidx_normalized = oidx, vidx
if isinstance(oidx, (int, np.int64)): oidx_normalized = slice(oidx, oidx+1, 1)
if isinstance(vidx, (int, np.int64)): vidx_normalized = slice(vidx, vidx+1, 1)
obs_sub = adata_ref.obs.iloc[oidx_normalized]
var_sub = adata_ref.var.iloc[vidx_normalized]
self._obsm = ArrayView(adata_ref.obsm[oidx_normalized], view_args=(self, 'obsm'))
self._varm = ArrayView(adata_ref.varm[vidx_normalized], view_args=(self, 'varm'))
# hackish solution here, no copy should be necessary
uns_new = deepcopy(self._adata_ref._uns)
# need to do the slicing before setting the updated self._n_obs, self._n_vars
self._n_obs = self._adata_ref.n_obs # use the original n_obs here
self._slice_uns_sparse_matrices_inplace(uns_new, self._oidx)
# fix _n_obs, _n_vars
if isinstance(oidx, slice):
self._n_obs = get_n_items_idx(obs_sub.index, adata_ref.n_obs)
elif isinstance(oidx, (int, np.int64)):
self._n_obs = 1
elif isinstance(oidx, Sized):
self._n_obs = get_n_items_idx(oidx, adata_ref.n_obs)
else:
raise KeyError('Unknown Index type')
if isinstance(vidx, slice):
self._n_vars = get_n_items_idx(var_sub.index, adata_ref.n_vars)
elif isinstance(vidx, (int, np.int64)):
self._n_vars = 1
elif isinstance(vidx, Sized):
self._n_vars = get_n_items_idx(vidx, adata_ref.n_vars)
else:
raise KeyError('Unknown Index type')
# fix categories
self._remove_unused_categories(adata_ref.obs, obs_sub, uns_new)
self._remove_unused_categories(adata_ref.var, var_sub, uns_new)
# set attributes
self._obs = DataFrameView(obs_sub, view_args=(self, 'obs'))
self._var = DataFrameView(var_sub, view_args=(self, 'var'))
self._uns = DictView(uns_new, view_args=(self, 'uns'))
# set data
if self.isbacked:
self._X = None
else:
self._init_X_as_view()
self._layers = AnnDataLayers(self, adata_ref=adata_ref, oidx=oidx, vidx=vidx)
# set raw, easy, as it's immutable anyways...
if adata_ref._raw is not None:
# slicing along variables axis is ignored
self._raw = adata_ref.raw[oidx]
else:
self._raw = None
def _init_X_as_view(self):
if self._adata_ref.X is None:
self._X = None
return
X = self._adata_ref._X[self._oidx, self._vidx]
if isinstance(X, sparse.csr_matrix):
self._X = SparseCSRView(X, view_args=(self, 'X'))
elif isinstance(X, sparse.csc_matrix):
self._X = SparseCSCView(X, view_args=(self, 'X'))
elif issparse(X):
raise ValueError('View on non-csr/csc sparse matrices not implemented.')
elif isinstance(X, ZappyArray): # ZappyArray acts as a view itself
self._X = X
else:
shape = (
get_n_items_idx(self._oidx, self._adata_ref.n_obs),
get_n_items_idx(self._vidx, self._adata_ref.n_vars)
)
if np.isscalar(X):
X = X.view()
self._X = ArrayView(X.reshape(shape), view_args=(self, 'X'))
def _init_as_actual(
self, X=None, obs=None, var=None, uns=None,
obsm=None, varm=None, raw=None, layers=None,
dtype='float32', shape=None,
filename=None, filemode=None):
from .readwrite.read import _read_args_from_h5ad
# view attributes
self._isview = False
self._adata_ref = None
self._oidx = None
self._vidx = None
# ----------------------------------------------------------------------
# various ways of initializing the data
# ----------------------------------------------------------------------
# init from file
if filename is not None:
if any((X, obs, var, uns, obsm, varm)):
raise ValueError(
'If initializing from `filename`, '
'no further arguments may be passed.')
self.file = AnnDataFileManager(self, filename, filemode)
X, obs, var, uns, obsm, varm, layers, raw = _read_args_from_h5ad(self, mode=filemode)
if X is not None:
# this is not a function that a user would use, hence it's fine to set the dtype
dtype = X.dtype.name
else:
self.file = AnnDataFileManager(self, None)
# init from AnnData
if isinstance(X, AnnData):
if any((obs, var, uns, obsm, varm)):
raise ValueError(
'If `X` is a dict no further arguments must be provided.')
X, obs, var, uns, obsm, varm, layers, raw = X._X, X.obs, X.var, X.uns, X.obsm, X.varm, X.layers, X.raw
# init from DataFrame
elif isinstance(X, pd.DataFrame):
obs = | pd.DataFrame(index=X.index) | pandas.DataFrame |
from __future__ import division
from unittest import TestCase
from nose_parameterized import parameterized
from pandas import (
Series,
DataFrame,
date_range,
datetime,
Panel
)
from pandas.util.testing import (assert_frame_equal,
assert_series_equal)
from pyfolio.capacity import (days_to_liquidate_positions,
get_max_days_to_liquidate_by_ticker,
get_low_liquidity_transactions,
daily_txns_with_bar_data,
apply_slippage_penalty)
class CapacityTestCase(TestCase):
dates = date_range(start='2015-01-01', freq='D', periods=3)
positions = DataFrame([[1.0, 3.0, 0.0],
[0.0, 1.0, 1.0],
[3.0, 0.0, 1.0]],
columns=['A', 'B', 'cash'], index=dates)
transactions = DataFrame(data=[[1, 100000, 10, 'A']] * len(dates),
columns=['sid', 'amount', 'price', 'symbol'],
index=dates)
volume = DataFrame([[1.0, 3.0],
[2.0, 2.0],
[3.0, 1.0]],
columns=['A', 'B'], index=dates)
volume = volume * 1000000
price = DataFrame([[1.0, 1.0]] * len(dates),
columns=['A', 'B'], index=dates)
market_data = Panel({'volume': volume, 'price': price})
def test_days_to_liquidate_positions(self):
dtlp = days_to_liquidate_positions(self.positions,
self.market_data,
max_bar_consumption=1,
capital_base=1e6,
mean_volume_window=1)
expected = DataFrame([[0.0, .5/3],
[0.75/2, 0.0]],
columns=['A', 'B'],
index=self.dates[1:])
| assert_frame_equal(dtlp, expected) | pandas.util.testing.assert_frame_equal |
# import module and libraries
import sys
sys.path.append('../')
from text_classification import text_classification as tc # noqa: E402
# ignoring E402 because need import sys and sys.path to access submodule
import pandas as pd # noqa: E402
import unittest # noqa: E402
from sklearn.feature_extraction.text import CountVectorizer # noqa: E402
from sklearn.naive_bayes import MultinomialNB # noqa: E402
class MyTestClassification(unittest.TestCase):
def setup(self):
print('START Test ...')
# ...............................................................................................
def test_split_test_train_dataset(self):
"""
Test the split_test_train_dataset function
- Call the function and validate the return value from the function
- If the function is working correctly, it will return the
sub-datasets
- The sub datasets size will be greater than 0
- If the sub datasets are 0 (aka. empty), and exception is raise
to fail the test
"""
print('START Testing.....')
print()
value_x_train = 0
value_x_test = 0
value_yelp_test = 0
res_health = pd.read_csv("sample_nutritionix_data.csv")
yelp = | pd.read_csv("sample_yelp_data.csv") | pandas.read_csv |
# coding: utf-8
"""
Classifiers.
Based on sklearn doc:
"http://scikit-learn.org/dev/developers/contributing.html\
#rolling-your-own-estimator"
"""
from itertools import product
import numpy as np
import pandas as pd
from scipy.optimize import LinearConstraint, minimize
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_is_fitted
from .methods import KernelMethod
def multiclass2one_vs_all(labels, first_class=1):
"""Transform multiclas label to 2 class labels
Params:
labels (array-like): list of labels
first_class: label considered as not the rest
Returns:
(list) list of labels containing only 1/-1
"""
if first_class not in labels:
first_class = labels[0]
return [1 if elt == first_class else -1 for elt in labels]
class SVDD(BaseEstimator, ClassifierMixin, KernelMethod):
"""Implement Support Vector DataDescription
.. math::
\\begin{cases}
min_{r, c} & r^2 - C \\sum_t \\xi_t \\\\
s.t & y_i \\| \\phi(x_i) -c \\| < r^2 + xi_i \\forall i \\\\
& \\xi_i > 0 \\forall i \\\\
\\end{cases}
"""
def __init__(self, kernel_matrix=None, kernel=None, C=1):
"""Initialize some parameters.
Those parameters may be overwritten by the fit() method.
"""
self.kernel_matrix = kernel_matrix # kernel matrix used for training
if kernel is None:
self.kernel = np.dot
else:
self.kernel = kernel
self.C = C
self.string_labels = False # are labels strings or int?
self.hypersphere_nb = 1
self.trained_on_sample = True # use directly kernel matrix or sample?
def fit(self, X, y=None, C=None, kernel=None, is_kernel_matrix=False):
"""Fit the classifier.
Args:
X: training samples.
y: training labels. If None, consider all samples belongs to the
same class (labeled "1").
C (numeric): contraint in the soft margin case. If None or zero,
then fall back to hard margin case.
kernel (fun): kernel method to use. (default: linear)
is_kernel_matrix (bool): if True, the input is treated as
a kernel matrix.
"""
# X, y = check_X_y(X, y) # TODO: add check method for X
self._classifier_checks(X, y, C, kernel, is_kernel_matrix)
if len(self.classes_) > 2 or (
len(self.classes_) == 2 and self.string_labels
):
# each class has its own hypersphere (one class vs rest)
self.hypersphere_nb = len(self.classes_)
self.individual_svdd = {}
for cl in self.classes_:
# TODO: multithread/asyncio
cl_svdd = SVDD(
kernel_matrix=self.kernel_matrix,
kernel=self.kernel,
C=self.C,
)
cl_y = [1 if elt == cl else -1 for elt in y]
cl_svdd.fit(X, cl_y, C, kernel, is_kernel_matrix)
self.individual_svdd[cl] = cl_svdd
self.y_ = y
self.alphas_ = np.array([0])
self.radius_ = 0
else:
# one hypersphere
self.y_ = np.sign(y)
self.radius_, self.alphas_ = self._fit_one_hypersphere()
return self
def predict(self, X, decision_radius=1):
"""Predict classes
Args:
X (array like): list of test samples.
decision_radius (numeric): modification of decision radius.
The frontier between classes will be the computed hypersphere
whose radius is multiply by this factor.
"""
check_is_fitted(self, ["X_", "alphas_"])
# X = check_array(X)
if self.hypersphere_nb == 1:
return self._predict_one_hypersphere(X, decision_radius)
else:
# check class
dist_classes = self.relative_dist_all_centers(X)
return np.array(dist_classes.idxmin(axis=1))
def fit_predict(self, X, y, C=None, kernel=None, is_kernel_matrix=False):
"""Fit as the fit() methods.
Returns:
(array) : class for each training sample.
"""
self.fit(X, y, C, kernel, is_kernel_matrix)
self.predict(X)
def _predict_one_hypersphere(self, X=None, decision_radius=1):
"""Compute results for one hypersphere
Args:
decision_radius (numeric): modification of decision radius.
The frontier between classes will be the computed hypersphere whose
radius is multiply by this factor.
Returns:
(np.array)
"""
pred = self._dist_center(X) * decision_radius / self.radius_ - 1
ret = np.sign(pred).reshape(-1)
return list(map(lambda x: 1 if x == 0 else x, ret))
def decision_function(self, X):
"""Generic decision value.
Args:
X (array-like): list of sample
"""
return self._dist_center(X) / self.radius_
def _dist_center(self, X=None):
"""Compute ditance to class center.
Args:
X (array-like): list of input vectors. If None, use the train set.
Distance to center:
.. math::
\\| z - c \\|^2 = \\|z\\|^2 - 2 K(z, c) + \\|c\\|^2
c = \\sum_t \\alpha_t \\phi(X_t)
"""
if not self.hypersphere_nb == 1:
raise RuntimeWarning("Not available for multiclass SVDD")
check_is_fitted(self, ["X_", "alphas_"])
dim = len(self.alphas_)
if X is None:
# return distances for training set
square_dists = [
self.kernel_matrix[i, i]
- 2
* sum(
self.alphas_[t] * self.kernel_matrix[i, t]
for t in range(dim)
)
+ sum(
self.alphas_[t]
* self.alphas_[s]
* self.kernel_matrix[s, t]
for s in range(dim)
for t in range(dim)
)
for i in range(dim)
]
else:
# return distances for vector X
square_dists = [
self.kernel(z, z)
- 2
* sum(
self.alphas_[t] * self.kernel(self.X_[t], z)
for t in range(dim)
)
+ sum(
self.alphas_[s]
* self.alphas_[t]
* self.kernel(self.X_[t], self.X_[s])
for s in range(dim)
for t in range(dim)
)
for z in X
]
return np.sqrt(square_dists)
def _fit_one_hypersphere(self, y=None, class1=1, class2=-1):
"""Perform actual fit process
* compute alphas
* compute support vectors
* recompute minimal kernel matrix
"""
if y is None:
y = self.y_
dim = len(self.X_)
alphas = [1 / dim] * dim
C = self.C
upper = C * np.ones(dim)
one = np.array([1])
# TODO: test other solver
# https://pypi.org/project/quadprog/
# http://cvxopt.org/r
def ell_d(al):
"""Dual function to minimize.
function to maximize:
.. maths::
L_D = \\alpha diag(K)^T - \\alpha K \\alpha^T
L_D = \\sum_s \\alpha_s K<x_s, x_s>
- \\sum_s \\sum_t \\alpha_s \\alpha_t K(x_s, x_t)
"""
ay = al * y
return -(
np.mat(ay).dot(np.diag(self.kernel_matrix))
- np.mat(ay).dot(self.kernel_matrix).dot(np.mat(ay).T)
)
cons = [
# \forall i 0 \leq \alpha[i] \leq C
LinearConstraint(A=np.identity(dim), lb=np.zeros(dim), ub=upper),
# \sum_i \alpha[i] = 1
LinearConstraint(A=np.ones(dim), lb=one, ub=one),
]
# TODO: asyncio
predicted_alphas = minimize(
ell_d, alphas, constraints=cons, options={"maxiter": 10000}
)
if not predicted_alphas.success:
raise RuntimeError(predicted_alphas.message)
alphas = predicted_alphas.x
# nullify almost null alphas:
alphas = list(map(lambda x: 0 if np.isclose(x, 0) else x, alphas))
# support vectors: 0 < alphas <= C
support_vectors = set.intersection(
set(np.where(np.less_equal(alphas, C))[0]),
set(np.nonzero(alphas)[0]),
)
self.support_vectors_ = self.support_vectors_.union(support_vectors)
if len(self.support_vectors_) < 2:
radius = np.min(
self.distance_matrix() + np.diag([C for _ in range(dim)])
)
else:
# mean distance to support vectors
radius = np.mean(
[
self.dist_center_training_sample(r, alphas)
for r in self.support_vectors_
]
)
return radius, np.array(alphas)
def dist_all_centers(self, X=None):
"""Return distance to each class center.
"""
if self.hypersphere_nb > 1:
dist_classes = {
cl: svdd._dist_center(X)
for cl, svdd in self.individual_svdd.items()
}
else:
dist_classes = {1: self._dist_center(X)}
return | pd.DataFrame(dist_classes) | pandas.DataFrame |
## License: ?
## Copyright(c) <NAME>. All Rights Reserved.
## Copyright(c) 2017 Intel Corporation. All Rights Reserved.
import cmath
import math
import os
from utils import calculateAngle2d, calculateAngle3d, calculateAngleFromSlope, direction_string_generator, forwards_string_generator, is_reach_out_left, is_reach_out_right, save_positional_to_csv, sway_string_generator
import cv2
import numpy as np
import pyrealsense2 as rs
from cubemos.skeletontracking.core_wrapper import CM_TargetComputeDevice #refer to cubmos documentation for installation
from cubemos.skeletontracking.native_wrapper import Api #refer to cubmos documentation for installation
import socket
import pandas as pd
joints = ['Nose','Neck','Right_shoulder','Right_elbow','Right_wrist','Left_shoulder',
'Left_elbow','Left_wrist','Right_hip','Right_knee','Right_ankle','Left_hip',
'Left_knee','Left_ankle','Right_eye','Left_eye','Right_ear','Left_ear']
prev_joint_3d_coords ={key: (0,0,0) for key in joints}
prev_joint_locations = {key: (0,0) for key in joints}
prev_joint_distances = {key: 0 for key in joints}
distance_data2d = []
position_data2d = []
position_data3d = []
UDP_IP = "192.168.100.202" #CIT Lab fancy computer on the right side from the entrance when one faces towards the room 192.168.164.170
UDP_PORT = 5065
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
prev_angle = 0.0
right_count = 0
left_count = 0
time_on_left = 0
time_on_right = 0
angle_data = [[0,0]]
right_limit = 85
right_limit_extreme = 80
left_limit = 95
left_limit_extreme = 100
forward_limit = 170
forward_limit_extreme = 165
reverse_limit = 185
direction = ""
sideways = ""
forwards = ""
reachout_right = False
reachout_left = False
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 15)
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 15)
#config.enable_stream(rs.stream.gyro, rs.format.motion_xyz32f, 200)
#To save video
#out = cv2.VideoWriter('skeleton_coordinates.mp4', 0x7634706d, 15.0, (1280, 720))
##########################################################################################################################
def default_license_dir():
return os.path.join(os.environ["HOME"], ".cubemos", "skeleton_tracking", "license") #"LOCALAPPDATA" in place of "HOME" for windows 10
##########################################################################################################################
api = Api(default_license_dir())
sdk_path = os.environ["CUBEMOS_SKEL_SDK"]
model_path = os.path.join(sdk_path, "models", "skeleton-tracking", "fp32", "skeleton-tracking.cubemos")
api.load_model(CM_TargetComputeDevice.CM_CPU, model_path)
profile = pipeline.start(config)
depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
colorizer = rs.colorizer()
##########################################################################################################################
def get_valid_coordinates(skeleton, depth, confidence_threshold):
result_coordinate = {}
result_distance = {}
for i in range (len(skeleton.joints)):
if skeleton.confidences[i] >= confidence_threshold:
if skeleton.joints[i][0] >= 0 and skeleton.joints[i][1] >= 0:
result_coordinate[joints[i]] = tuple(map(int, skeleton.joints[i]))
dist,_,_,_ = cv2.mean((depth[result_coordinate[joints[i]][1]-3:result_coordinate[joints[i]][1]+3,result_coordinate[joints[i]][0]-3:result_coordinate[joints[i]][0]+3].astype(float))*depth_scale)
result_distance[joints[i]] = dist
return result_coordinate,result_distance
##########################################################################################################################
def convert_depth_to_phys_coord_using_realsense(intrin,x, y, depth):
result = rs.rs2_deproject_pixel_to_point(intrin, [x, y], depth)
#result[0]: right (x), result[1]: down (y), result[2]: forward (z) from camera POV
return result[0], result[1], result[2]
##########################################################################################################################
def render_result(skeletons, color_img, depth_img, intr, confidence_threshold):
global direction
global sideways
global forwards
global prev_angle
global right_count
global left_count
neck = (0,0)
x_neck,y_neck,z_neck = 0,0,0
mid_hip = (0,0)
sway_angle = 90
forward_angle = 0
right_hip,left_hip = (0,0),(0,0)
x_mid_hip,y_mid_hip,z_mid_hip = 0,0,0
skeleton_color = (0, 140, 255)
if len(skeletons) == 1:
for index, skeleton in enumerate(skeletons):
joint_locations,joint_distances = get_valid_coordinates(skeleton, depth_img, confidence_threshold)
joint_3d_coords = {key: (0,0,0) for key in joints}
joint_2d_coords = {key: (0,0) for key in joints}
joint_2d_distances = {key: 0 for key in joints}
for joint,coordinate in joint_locations.items():
cv2.circle(color_img, coordinate, radius=5, color=skeleton_color, thickness=-1)
joint_3d_coords[joint] = convert_depth_to_phys_coord_using_realsense(intr, coordinate[0], coordinate[1], joint_distances[joint])
joint_2d_coords[joint] = joint_locations[joint]
joint_2d_distances[joint] = joint_distances[joint]
for joint, coordinate in joint_3d_coords.items():
if coordinate == (0,0,0):
joint_2d_coords[joint] = prev_joint_locations[joint]
joint_2d_distances[joint] = prev_joint_distances[joint]
joint_3d_coords[joint] = prev_joint_3d_coords[joint]
else:
prev_joint_locations[joint] = joint_2d_coords[joint]
prev_joint_distances[joint] = joint_2d_distances[joint]
prev_joint_3d_coords[joint] = joint_3d_coords[joint]
rowtowrite = [j for i,j in joint_3d_coords.items()]
rowtowrite2 = [j for i,j in joint_2d_coords.items()]
rowtowrite3 = [j for i,j in joint_2d_distances.items()]
position_data3d.append(rowtowrite)
position_data2d.append(rowtowrite2)
distance_data2d.append(rowtowrite3)
if 'Neck' in joint_locations:
neck = joint_locations['Neck']
(x_neck, y_neck, z_neck) = joint_3d_coords['Neck']
if 'Left_hip' in joint_locations:
left_hip = joint_locations['Left_hip']
if 'Right_hip' in joint_locations:
right_hip = joint_locations['Right_hip']
mid_hip = (math.ceil((left_hip[0]+right_hip[0])/2),math.ceil((left_hip[1]+right_hip[1])/2))
distance,_,_,_ = cv2.mean((depth_img[mid_hip[1]-3:mid_hip[1]+3,mid_hip[0]-3:mid_hip[0]+3].astype(float))*depth_scale)
mid_hip3d = x_mid_hip,y_mid_hip,z_mid_hip = convert_depth_to_phys_coord_using_realsense(intr, neck[0], mid_hip[1], distance)
sway_angle = calculateAngleFromSlope(neck, mid_hip)
forwards = ""
sideways = sway_string_generator(sway_angle, right_limit, left_limit, right_limit_extreme, left_limit_extreme)
forward_angle = calculateAngle3d(joint_3d_coords['Neck'], mid_hip3d, (x_mid_hip,(y_mid_hip+0.25),z_mid_hip))
forwards = forwards_string_generator(forward_angle, forward_limit, forward_limit_extreme, reverse_limit)
right_angle = calculateAngle2d(joint_2d_coords['Right_elbow'], joint_2d_coords['Right_shoulder'], joint_2d_coords['Right_hip'])
left_angle = calculateAngle2d(joint_2d_coords['Left_elbow'], joint_2d_coords['Left_shoulder'], joint_2d_coords['Left_hip'])
reachout_right = is_reach_out_right(joint_2d_coords)
reachout_left = is_reach_out_left(joint_2d_coords)
if(prev_angle<0 and (90-sway_angle)>0 and sway_angle<right_limit):
right_count = right_count +1
elif(prev_angle>0 and (90-sway_angle)<0 and sway_angle>left_limit):
left_count = left_count + 1
if(prev_angle == 0 and (90-sway_angle)<0):
prev_angle = 90-sway_angle
elif(prev_angle == 0 and (90-sway_angle)>0):
prev_angle = 90-sway_angle
elif((90-sway_angle)<0):
prev_angle = 90-sway_angle
elif((90-sway_angle)>0):
prev_angle = 90-sway_angle
angle_data.append([180-forward_angle,90-sway_angle])
# cv2.putText(color_img,"forward_angle={0:.6}".format(forward_angle),(850,25), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2,cv2.LINE_AA)
# cv2.putText(color_img,"sway_angle={0:.6}".format(sway_angle),(50,25), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2,cv2.LINE_AA)
cv2.putText(color_img,"right_angle={0:.6}".format(right_angle),(850,25), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2,cv2.LINE_AA)
cv2.putText(color_img,"left_angle={0:.6}".format(left_angle),(50,25), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2,cv2.LINE_AA)
if reachout_right:
cv2.putText(color_img,"Reach Out(Right)",(50,250), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2,cv2.LINE_AA)
if reachout_left:
cv2.putText(color_img,"Reach Out(Left)",(350,250), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2,cv2.LINE_AA)
direction = direction_string_generator(forwards, sideways)
message = direction
sock.sendto((message).encode(), (UDP_IP, UDP_PORT))
#print(message)
cv2.imshow('Skeleton', color_img)
else:
cv2.imshow('Skeleton', color_img)
##########################################################################################################################
while True:
frame = pipeline.wait_for_frames()
align = rs.align(rs.stream.color)
aligned_frame = align.process(frame)
depth_frame = aligned_frame.get_depth_frame()
color_frame = aligned_frame.get_color_frame()
prof = depth_frame.get_profile()
video_prof = prof.as_video_stream_profile()
intrinsics = video_prof.get_intrinsics()
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
#color_imgmap = np.asanyarray(colorizer.colorize(depth_frame).get_data())
skeletons = api.estimate_keypoints(color_image, 256)
render_result(skeletons, color_image, depth_image, intrinsics, 0.6)
cv2.namedWindow('Skeleton', cv2.WINDOW_AUTOSIZE)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyWindow('Skeleton')
break
pipeline.stop()
save_positional_to_csv(position_data3d, "coordinates3d", position_data2d, "coordinates2d", distance_data2d, "distances2d")
df = | pd.DataFrame(angle_data,columns=['Forward angle','Sway_angle']) | pandas.DataFrame |
#Descriptions: An inefficient script that scrubs unwanted streams and variables. Also reassigns node names to simplified names.
#Author: iblack
#Last updated: 2020-05-06
import os
import requests
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
os.chdir(r'')
master = pd.read_csv(r'')
user = '' #OOI API user for <EMAIL>
token = '' #OOI API token for <EMAIL>
base_url = 'https://ooinet.oceanobservatories.org/api/m2m/' # Base M2M URL.
deploy_url = '12587/events/deployment/inv/' # Deployment information.
sensor_url = '12576/sensor/inv/' # Sensor information.
anno_url = '12580/anno/' # Annotations information.
stream_url = '12575/stream/' # Streams information.
#Request available streams from the OOI API.
r = requests.get(base_url + stream_url,auth = (user,token)).json() #Request all OOI streams and throw it into a JSON object.
streams = json_normalize(r) #Put the JSON object into a normalized Pandas dataframe.
science_streams = streams.loc[streams['stream_type.value'].str.contains('Science')].reset_index(drop=True)
#Holder arrays
var_names = pd.DataFrame()
var_display = pd.DataFrame()
var_desc = pd.DataFrame()
var_standard = pd.DataFrame()
var_dpi = pd.DataFrame()
var_dl = pd.DataFrame()
var_dpt = pd.DataFrame()
var_units = pd.DataFrame()
var_id = pd.DataFrame()
#For each data variable in each stream.
for param in science_streams['parameters']:
d = pd.DataFrame(param).reset_index(drop=True)
#List of variables to drop.
var_drops = ['port_timestamp',
'driver_timestamp',
'internal_timestamp',
'preferred_timestamp',
'ingestion_timestamp',
'suspect_timestamp'
'date_time_string',
'oxy_calphase',
'input_voltage',
'voltage_out',
'date_of_sample',
'packet_type',
'serial_number',
'checksum',
'unique_id',
'firmware_version',
'record_length',
'sysconfig_frequency',
'sysconfig_beam_pattern',
'date_string',
'time_string',
'ext_volt0',
'meter_type',
'firmware_revision',
'instrument_id',
'record_type',
'record_time',
'voltage_battery',
'data_source_id',
'num_bytes',
'raw_signal_beta',
'raw_signal_chl',
'raw_signal_cdom',
'date_time_array',
'error_code',
'header_id',
'status',
'thermistor_raw'
'sysconfig_vertical_orientation',
'raw_time_seconds',
'raw_time_microseconds',
'suspect_timestamp',
'calibrated_phase',
'blue_phase',
'red_phase',
'temp_compensated_phase',
'blue_amplitude',
'red_amplitude',
'raw_temperature',
'error_vel_threshold',
'timer',
'thermistor_start',
'thermistor_end',
'reference_light_measurements',
'light_measurements',
'aux_fitting_1',
'aux_fitting_2',
'frame_header',
'frame_type',
'frame_counter',
'aux_fitting_3',
'rms_error',
'dcl_controller_timestamp',
'sample_time',
'temp_lamp',
'voltage_lamp',
'voltage_main',
'temp_interior',
'lamp_time',
'suspect_timestamp',
'thermistor_end',
'thermistor_start',
'time_of_sample',
'aux_fitting',
'date_of_sample',
'chl_volts',
'unique_id',
'record_time',
'light_measurements',
'thermistor_start',
'reference_light_measurements',
'battery_voltage',
'sensor_id',
'vin_sense',
'time_sync_flag',
'fixed_leader_id',
'sysconfig_sensor_config',
'num_data_types',
'va_sense',
'raw_internal_temp',
'phsen_battery_volts',
'humidity',
'sio_controller_timestamp',
'sysconfig_head_attached',
'sysconfig_vertical_orientation',
'data_flag',
'external_temp_raw',
'measurement_wavelength_beta',
'measurement_wavelength_chl',
'measurement_wavelength_cdom',
'raw_internal_temp',
'seawater_scattering_coefficient',
'total_volume_scattering_coefficient',
'port_number',
'product_number',
'internal_temperature',
'thermistor_raw',
'bit_result_demod_1',
'bit_result_demod_0',
'bit_result_timing',
'inductive_id',
'raw_internal_temp',
'start_dir',
'file_time',
'thermistor_raw',
'analog_input_2',
'analog_input_1',
'dosta_ln_optode_oxygen',
'oxy_temp_volts',
'voltage_analog',
'ref_channel_average',
'dosta_abcdjm_cspp_tc_oxygen',
'estimated_oxygen_concentration',
'ctd_tc_oxygen',
'par_val_v',
'analog1',
'absorbance_ratio',
'absolute_pressure',
'pressure_temp',
'water_velocity_east',
'ensemble_number',
'transducer_depth',
'error_seawater_velocity',
'corrected_echo',
'water_velocity_up',
'water_velocity_north',
'error_velocity',
'correlation_magnitude',
'echo_intensity',
'percent_good',
'percent_transforms_reject',
'percent_bad',
'non_zero_depth',
'depth_from_pressure',
'non_zero_pressure',
'bin_1_distance',
'cell_length',
'num_cells',
'ensemble_counter',
'amplitude_beam',
'correlation_beam',
'turbulent_velocity_east',
'turbulent_velocity_north',
'turbulent_velocity_vertical',
'abcdef_signal_intensity',
'internal_temp_raw',
'velocity_beam',
'temp_spectrometer',
'nutnr_nitrogen_in_nitrate',
'nutnr_absorbance_at',
'nutnr_bromide',
'nutnr_spectrum_average',
'spectral_channels',
'nutnr_dark_value_used',
'nutnr_integration',
'nutnr_voltage',
'nutnr_current',
'nutnr_fit',
'sample_delay',
'ref_channel_variance',
'sea_water_dark',
'spec_channel_average',
'phsen_thermistor_temperature',
'day_of_year',
'ctd_time_uint32',
'signal_intensity']
d = d.loc[~d['name'].str.contains('|'.join(var_drops))].reset_index(drop=True)
names = '|'.join(d['name'])
var_names = np.append(var_names,names)
display = '|'.join(d['display_name'])
var_display = np.append(var_display,display)
check = d.isna()
for i in range(len(check)):
if check['parameter_function_map'][i] == True:
d['parameter_function_map'][i] = 'NA'
if check['standard_name'][i] == True:
d['standard_name'][i] = 'NA'
if check['description'][i] == True:
d['description'][i] = 'NA'
if check['data_product_identifier'][i] == True:
d['data_product_identifier'][i] = 'NA'
if check['data_level'][i] == True:
d['data_level'][i] = 'NA'
if check['data_product_type'][i] == True:
d['data_product_type'][i] = 'NA'
desc = '|'.join(d['description'])
var_desc = np.append(var_desc,desc)
dpi = '|'.join(d['data_product_identifier'])
var_dpi = np.append(var_dpi,dpi)
dpt_df = pd.DataFrame()
for dpt in d['data_product_type']:
t = pd.DataFrame([dpt])
dpt_df = | pd.concat([dpt_df,t]) | pandas.concat |
import logging
import re
from datetime import datetime as dt
from datetime import timedelta as delta
import exchangelib as ex
import pandas as pd
from exchangelib import (DELEGATE, Account, Configuration, Credentials,
FaultTolerance)
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog.config import AZURE_WEB
from smseventlog.utils import fileops as fl
from smseventlog.utils.credentials import CredentialManager
# silence exchangelib naive datetime on last_modified_time info log
logging.getLogger('exchangelib.fields').setLevel(logging.WARNING)
log = getlog(__name__)
class ExchangeAccount():
def __init__(self, gui=False, login=True):
_exch = None
_fldr_root, _wo_folder = None, None
cred_manager = CredentialManager(name='exchange', gui=gui)
f.set_self(vars(), exclude='login')
if login:
self.login()
@property
def exchange(self):
# exchangelib account object
if self._exch is None:
self._exch = self.create_account()
return self._exch
def login(self):
self._exch = self.create_account()
def create_config(self, credentials, m_config=None):
if m_config is None:
# failed once, use hardcoded vals
service_endpoint = 'https://outlook.office365.com/EWS/Exchange.asmx'
auth_type = 'basic'
version = None
else:
service_endpoint = m_config.get('ews_url', None)
auth_type = m_config.get('ews_auth_type', None)
version = m_config.get('ews_version', None)
config = Configuration(
retry_policy=FaultTolerance(max_wait=40),
credentials=credentials,
service_endpoint=service_endpoint,
auth_type=auth_type,
version=version)
return config
def create_account(self, failcount=0, config=None, autodiscover=None):
email, password = self.cred_manager.load()
credentials = Credentials(username=email, password=password)
# first try to load saved config from QSettings
keys = ('ews_url', 'ews_auth_type', 'ews_version')
m = self.cred_manager.load_multi(keys=keys)
# don't need to autodiscover if already have saved settings
if autodiscover is None:
autodiscover = True if m.get('ews_url', None) is None else False
if config is None:
config = self.create_config(credentials=credentials, m_config=m)
try:
account = Account(
primary_smtp_address=email,
config=config,
autodiscover=autodiscover,
access_type=DELEGATE) # important to be delegate, otherwise tries 'Impersonate' > doesnt work
self.save_account_settings(account=account)
except:
log.warning(f'Failed creating account: {failcount}')
failcount += 1
if failcount == 1:
# on first fail, need to retry with manual credentials
config = self.create_config(credentials=credentials) # use hardcoded
account = self.create_account(failcount=failcount, config=config, autodiscover=False)
elif failcount <= 2:
account = self.create_account(failcount=failcount)
else:
return None
return account
def save_account_settings(self, account):
if AZURE_WEB:
return
m = dict(
ews_url=account.protocol.service_endpoint,
ews_auth_type=account.protocol.auth_type,
ews_version=account.version)
self.cred_manager.save_multi(vals=m)
@property
def fldr_root(self):
if self._fldr_root is None:
self._fldr_root = self.exchange.root / 'Top of Information Store'
return self._fldr_root
@property
def wo_folder(self):
if self._wo_folder is None:
self._wo_folder = self.fldr_root.glob('WO Request')
return self._wo_folder
def get_wo_from_email(self, unit, title):
tz = ex.EWSTimeZone.localzone()
maxdate = dt.now() + delta(days=-15)
messages = self.wo_folder \
.filter(
datetime_received__range=(
tz.localize(ex.EWSDateTime.from_datetime(maxdate)),
tz.localize(ex.EWSDateTime.now()))) \
.filter(subject__icontains=title) \
.filter(subject__icontains=unit)
expr = re.compile('WO[0-9]{7}', re.IGNORECASE)
for msg in messages.all():
match = re.search(expr, str(msg))
if not match is None:
wo = match.group(0)
return wo
def parse_attachment(attachment, d=None, header=2):
data = fl.from_bytes(attachment.content)
df = | pd.read_csv(data, header=header) | pandas.read_csv |
"""
assign cell identity based on SNR and UMI_min
"""
from celescope.__init__ import ROOT_PATH
from celescope.tools.step import Step, s_common
import celescope.tools.utils as utils
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import subprocess
import matplotlib
matplotlib.use('Agg')
def get_opts_count_tag(parser, sub_program):
parser.add_argument(
"--UMI_min",
help="Default='auto'. Minimum UMI threshold. Cell barcodes with valid UMI < UMI_min are classified as *undeterminded*.",
default="auto"
)
parser.add_argument(
"--dim",
help="Default=1. Tag dimentions. Usually we use 1-dimentional tag.",
default=1
)
parser.add_argument(
"--SNR_min",
help="""Default='auto'. Minimum signal-to-noise ratio.
Cell barcodes with UMI >=UMI_min and SNR < SNR_min are classified as *multiplet*. """,
default="auto"
)
parser.add_argument("--combine_cluster",
help="Conbine cluster tsv file.", default=None)
parser.add_argument(
"--coefficient",
help="""Default=0.1. If `SNR_min` is 'auto', minimum signal-to-noise ratio is calulated as
`SNR_min = max(median(SNRs) * coefficient, 2)`.
Smaller `coefficient` will cause less *multiplet* in the tag assignment.""",
default=0.1
)
if sub_program:
parser.add_argument("--read_count_file", help="Tag read count file.", required=True)
parser.add_argument("--match_dir", help="Match celescope scRNA-Seq directory.")
parser.add_argument("--matrix_dir", help="Match celescope scRNA-Seq matrix directory.")
parser.add_argument("--tsne_file", help="t-SNE coord file.")
s_common(parser)
def count_tag(args):
step_name = "count_tag"
runner = Count_tag(args, step_name)
runner.run()
class Count_tag(Step):
"""
Features
- Assign tag to each cell barcode and summarize.
Output
- `{sample}_umi_tag.tsv`
`first column` cell barcode
`last column` assigned tag
`columns between first and last` UMI count for each tag
- `{sample}_tsne_tag.tsv` it is `{sample}_umi_tag.tsv` with t-SNE coordinates, gene_counts and cluster infomation
- `{sample}_cluster_count.tsv` cell barcode number assigned to *undeterminded*, *multiplet* and *each tag*
"""
def __init__(self, args, step_name):
Step.__init__(self, args, step_name)
self.read_count_file = args.read_count_file
self.UMI_min = args.UMI_min
self.SNR_min = args.SNR_min
self.combine_cluster = args.combine_cluster
self.dim = int(args.dim)
self.coefficient = float(args.coefficient)
# read
self.df_read_count = pd.read_csv(self.read_count_file, sep="\t", index_col=0)
if args.match_dir:
match_dict = utils.parse_match_dir(args.match_dir)
self.match_barcode = match_dict['match_barcode']
self.cell_total = match_dict['cell_total']
self.tsne_file = match_dict['tsne_coord']
self.matrix_dir = match_dict['matrix_dir']
elif args.matrix_dir:
df_barcode = pd.read_csv(f'{args.matrix_dir}/barcodes.tsv', header=None)
self.match_barcode = df_barcode[0].tolist()
self.cell_total = len(self.match_barcode)
self.tsne_file = args.tsne_file
self.matrix_dir = args.matrix_dir
else:
raise ValueError("--match_dir or --matrix_dir is required.")
# init
self.no_noise = False
# out files
self.UMI_tag_file = f'{self.outdir}/{self.sample}_umi_tag.tsv'
self.tsne_tag_file = f'{self.outdir}/{self.sample}_tsne_tag.tsv'
self.cluster_count_file = f'{self.outdir}/{self.sample}_cluster_count.tsv'
self.cluster_plot = f'{self.outdir}/{self.sample}_cluster_plot.pdf'
if self.combine_cluster:
self.combine_cluster_count_file = f'{self.outdir}/{self.sample}_combine_cluster_count.tsv'
self.combine_cluster_plot = f'{self.outdir}/{self.sample}_combine_cluster_plot.pdf'
@staticmethod
def get_UMI(row):
return row.sum()
@staticmethod
def get_UMI_min(df_cell_UMI, UMI_min):
if UMI_min == "auto":
UMI_min1 = np.percentile(df_cell_UMI.sum(axis=1), 5)
UMI_min2 = np.median(df_cell_UMI.sum(axis=1)) / 10
UMI_min = int(min(UMI_min1, UMI_min2))
UMI_min = max(UMI_min, 1)
return UMI_min
else:
return int(UMI_min)
@staticmethod
def get_SNR(row, dim):
row_sorted = sorted(row, reverse=True)
noise = row_sorted[dim]
signal = row_sorted[dim - 1]
if signal == 0:
return 0
if noise == 0:
return np.inf
return float(signal) / noise
@utils.add_log
def get_SNR_min(self, df_cell_UMI, SNR_min, UMI_min):
UMIs = df_cell_UMI.apply(Count_tag.get_UMI, axis=1)
df_valid_cell_UMI = df_cell_UMI[UMIs >= UMI_min]
if SNR_min == "auto":
# no noise
if df_valid_cell_UMI.shape[1] <= self.dim:
Count_tag.get_SNR_min.logger.warning('*** No NOISE FOUND! ***')
self.no_noise = True
return 0
SNRs = df_valid_cell_UMI.apply(Count_tag.get_SNR, dim=self.dim, axis=1)
if np.median(SNRs) == np.inf:
return 10
return max(np.median(SNRs) * self.coefficient, 2)
else:
return float(SNR_min)
@staticmethod
def tag_type(row, UMI_min, SNR_min, dim, no_noise=False):
if no_noise:
SNR = 1
else:
SNR = Count_tag.get_SNR(row, dim)
UMI = Count_tag.get_UMI(row)
if UMI < UMI_min:
return "Undetermined"
if SNR < SNR_min:
return "Multiplet"
# get tag
signal_tags = sorted(row.sort_values(ascending=False).index[0:dim])
signal_tags_str = "_".join(signal_tags)
return signal_tags_str
def write_and_plot(self, df, column_name, count_file, plot_file):
df_count = df.groupby(["tag", column_name]).size().unstack()
df_count.fillna(0, inplace=True)
df_count.to_csv(count_file, sep="\t")
df_percent = df_count / df_count.sum()
df_plot = df_percent.stack().reset_index()
df_plot.rename({0: "percent"}, axis=1, inplace=True)
# plot
colors = list(matplotlib.colors.cnames.keys())
fig, ax = plt.subplots(figsize=(20, 10))
types = df_plot["tag"].drop_duplicates()
margin_bottom = np.zeros(len(df_plot[column_name].drop_duplicates()))
for num, tag_type in enumerate(types):
values = list(df_plot.loc[df_plot["tag"] == tag_type, "percent"])
df_plot[df_plot['tag'] == tag_type].plot.bar(
x=column_name, y='percent', ax=ax, stacked=True,
bottom=margin_bottom, label=tag_type, color=colors[num * 3 + 1])
margin_bottom += values
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title("tag fraction")
fig.savefig(plot_file)
@utils.add_log
def run(self):
mapped_read = self.df_read_count['read_count'].sum()
# in cell
df_read_count_in_cell = self.df_read_count[self.df_read_count.index.isin(self.match_barcode)]
mapped_read_in_cell = int(df_read_count_in_cell['read_count'].sum())
self.add_metric(
name='Mapped Reads in Cells',
value=mapped_read_in_cell,
total=mapped_read,
)
# UMI
tag_name = df_read_count_in_cell.columns[0]
df_UMI_in_cell = df_read_count_in_cell.reset_index().groupby([
'barcode', tag_name]).agg({'UMI': 'count'})
df_UMI_in_cell = df_UMI_in_cell.reset_index()
df_UMI_in_cell = df_UMI_in_cell.pivot(
index='barcode', columns=tag_name, values='UMI')
df_cell = pd.DataFrame(index=self.match_barcode)
df_UMI_cell = pd.merge(
df_cell,
df_UMI_in_cell,
how="left",
left_index=True,
right_index=True
)
# fillna
df_UMI_cell.fillna(0, inplace=True)
df_UMI_cell = df_UMI_cell.astype(int)
# UMI
UMIs = df_UMI_cell.apply(sum, axis=1)
umi_median = round(np.median(UMIs), 2)
umi_mean = round(np.mean(UMIs), 2)
self.add_metric(
name='Median UMI per Cell',
value=umi_median,
)
self.add_metric(
name='Mean UMI per Cell',
value=umi_mean,
)
UMI_min = Count_tag.get_UMI_min(df_UMI_cell, self.UMI_min)
Count_tag.run.logger.info(f'UMI_min: {UMI_min}')
SNR_min = self.get_SNR_min(df_UMI_cell, self.SNR_min, UMI_min)
Count_tag.run.logger.info(f'SNR_min: {SNR_min}')
df_UMI_cell["tag"] = df_UMI_cell.apply(
Count_tag.tag_type, UMI_min=UMI_min, SNR_min=SNR_min, dim=self.dim, no_noise=self.no_noise, axis=1)
df_UMI_cell.to_csv(self.UMI_tag_file, sep="\t")
df_tsne = | pd.read_csv(self.tsne_file, sep="\t", index_col=0) | pandas.read_csv |
import load
import tokenizer
import pickle
import numpy as np
from collections import Counter
import pandas
import os
tags = ["eou", "eot"]
word_counts_path = "dumps/word_counts.pkl"
word_indices_parth = "dumps/word_indices.pkl"
_unk = "<UNK>"
_pad = "<PAD>"
def construct_indices_from_count():
"""Convert the dictionary of word counts into a dictionary of word indices"""
with open(word_counts_path, "rb") as f:
counts = pickle.load(f)
vocab = list(counts.keys())
# Account for padding and unknown words
vocab = [_pad, _unk] + vocab
word_indices = dict(zip(vocab, range(len(vocab))))
with open(word_indices_parth, "wb") as f:
pickle.dump(word_indices, f)
def reconstruct_tags(sentences):
"""Tags in the form __tag__ are being tokenize into 3 tokens.
We don't want that to happen, so we put them back together"""
new_sents = []
for sentence in sentences:
temp_sent = np.array(sentence)
to_remove = []
for tag in tags:
indices = np.argwhere(temp_sent == tag).flatten()
for i in indices:
if temp_sent[i-1] == "__" and temp_sent[i+1] == "__":
to_remove.extend([i-1, i+1])
temp_sent[i] = "__" + tag + "__"
new_sents.append(np.delete(temp_sent, to_remove).tolist())
return new_sents
def merge_back_test_array(context, true, distractors):
res = []
for i in range(len(context)):
row = []
row.append(context[i])
row.append(true[i])
for k in range(len(distractors)):
row.append(distractors[k][i])
res.append(row)
return res
def merge_back_train_array(context, hypothesis, value):
# Value is a numpy array, so use item() to get the value
res = []
for i in range(len(context)):
row = []
row.append(context[i])
row.append(hypothesis[i])
row.append(value[i])
res.append(row)
return res
def split_training_dataset(file, nb_splits, output_format_file):
# Output format file is expected to be in the form "filename_{}.csv", where the brackets will be replaced by the split number
train = load.load_csv(file)
subtrains = np.split(train, nb_splits, 0)
for i in range(len(subtrains)):
df = | pandas.DataFrame(subtrains[i]) | pandas.DataFrame |
import numpy as np
import imageio
import os
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
from brainio_base.stimuli import StimulusSet
class Stimulus:
def __init__(self, size_px=[448, 448], bit_depth=8,
stim_id=1000, save_dir='images', type_name='stimulus',
format_id='{0:04d}'):
self.save_dir = save_dir
self.stim_id = stim_id
self.format_id = format_id
self.type_name = type_name
self.white = np.uint8(2**bit_depth-1)
self.black = np.uint8(0)
self.gray = np.uint8(self.white/2+1)
self.size_px = size_px
self.objects = []
self.stimulus = np.ones(self.size_px, dtype=np.uint8) * self.gray
def add_object(self, stim_object):
self.objects.append(stim_object)
def build_stimulus(self):
for obj in self.objects:
self.stimulus[obj.mask] = obj.stimulus[obj.mask]
def clear_stimulus(self):
self.stimulus = np.ones(self.size, dtype=np.uint8) * self.gray
def show_stimulus(self):
my_dpi = 192
fig = plt.figure()
fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False)
ax = plt.axes([0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(self.stimulus, cmap='gray')
plt.show()
def save_stimulus(self):
file_name= self.type_name + '_' + self.format_id.format(self.stim_id) + '.png'
imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus)
return file_name
class Grating:
def __init__(self, orientation=0, phase=0, sf=2, size_px=[448, 448], width=8,
contrast=1, bit_depth=8, pos=[0, 0], rad=5, sig=0,
stim_id=1000, format_id='{0:04d}', save_dir='images', type_name='grating'):
# save directory
self.save_dir = save_dir
self.stim_id = stim_id
self.format_id = format_id
# label for type of stimulus
self.type_name = type_name
# 1 channel colors, white, black, grey
self.white = np.uint8(2**bit_depth-1)
self.black = np.uint8(0)
self.gray = np.uint8(self.white/2+1)
# pixel dimensions of the image
self.size_px = np.array(size_px)
# position of image in field of view
self.pos = np.array(pos)
# pixel to visual field degree conversion
self.px_to_deg = self.size_px[1] / width
# size of stimulus in visual field in degrees
self.size = self.size_px / self.px_to_deg
# orientation in radians
self.orientation = orientation / 180 * np.pi
# phase of the grating
self.phase = phase / 180 * np.pi
# spatial frequency of the grating
self.sf = sf
# contrast of the grating
self.contrast = contrast
# make self.xv and self.yv store the degree positions of all pixels in the image
self.xv = np.zeros(size_px)
self.yv = np.zeros(size_px)
self.update_frame()
self.mask = np.ones(size_px, dtype=bool)
self.set_circ_mask(rad=rad)
self.tex = np.zeros(size_px)
self.stimulus = np.ones(size_px, dtype=np.uint8) * self.gray
self.envelope = np.ones(size_px)
if sig is 0:
self.update_tex()
else:
self.set_gaussian_envelope(sig)
def update_frame(self):
x = (np.arange(self.size_px[1]) - self.size_px[1]/2) / self.px_to_deg - self.pos[1]
y = (np.arange(self.size_px[0]) - self.size_px[0]/2) / self.px_to_deg - self.pos[0]
# all possible degree coordinates in matrices of points
self.xv, self.yv = np.meshgrid(x, y)
def update_tex(self):
# make the grating pattern
self.tex = (np.sin((self.xv * np.cos(self.orientation) + self.yv * np.sin(self.orientation)) *
self.sf * 2 * np.pi + self.phase) * self.contrast * self.envelope)
def update_stimulus(self):
self.stimulus[self.mask] = np.uint8(((self.tex[self.mask]+1)/2)*self.white)
self.stimulus[np.logical_not(self.mask)] = self.gray
def set_circ_mask(self, rad):
# apply operation to put a 1 for all points inclusively within the degree radius and a 0 outside it
self.mask = self.xv**2 + self.yv**2 <= rad ** 2
# same as circular mask but for an annulus
def set_annular_mask(self, inner_rad, outer_rad):
self.mask = (self.xv ** 2 + self.yv ** 2 <= outer_rad ** 2) * \
(self.xv ** 2 + self.yv ** 2 > inner_rad ** 2)
def set_gaussian_envelope(self, sig):
d = np.sqrt(self.xv**2 + self.yv**2)
self.envelope = np.exp(-d**2/(2 * sig**2))
self.update_tex()
def show_stimulus(self):
# pyplot stuff
self.update_stimulus()
my_dpi = 192
fig = plt.figure()
fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False)
ax = plt.axes([0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(self.stimulus, cmap='gray')
plt.show()
def save_stimulus(self):
# save to correct (previously specified) directory
self.update_stimulus()
file_name = self.type_name + '_' + self.format_id.format(self.stim_id) + '.png'
imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus)
return file_name
def load_stim_info(stim_name, data_dir):
stim = pd.read_csv(os.path.join(data_dir, 'stimulus_set'), dtype={'image_id': str})
image_paths = dict((key, value) for (key, value) in zip(stim['image_id'].values,
[os.path.join(data_dir, image_name) for image_name
in stim['image_file_name'].values]))
stim_set = StimulusSet(stim[stim.columns[:-1]])
stim_set.image_paths = image_paths
stim_set.identifier = stim_name
return stim_set
def gen_blank_stim(degrees, size_px, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
stim = Stimulus(size_px=[size_px, size_px], type_name='blank_stim', save_dir=save_dir, stim_id=0)
stimuli = pd.DataFrame({'image_id': str(0), 'degrees': [degrees]})
image_names = (stim.save_stimulus())
stimuli['image_file_name'] = pd.Series(image_names)
stimuli['image_current_local_file_path'] = pd.Series(save_dir + os.sep + image_names)
stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False)
def gen_grating_stim(degrees, size_px, stim_name, grat_params, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
width = degrees
nStim = grat_params.shape[0]
print('Generating stimulus: #', nStim)
stimuli = pd.DataFrame({'image_id': [str(n) for n in range(nStim)], 'degrees': [width] * nStim})
image_names = nStim * [None]
image_local_file_path = nStim * [None]
all_y = nStim * [None]
all_x = nStim * [None]
all_c = nStim * [None]
all_r = nStim * [None]
all_s = nStim * [None]
all_o = nStim * [None]
all_p = nStim * [None]
for i in np.arange(nStim):
stim_id = np.uint64(grat_params[i, 0] * 10e9 + grat_params[i, 1] * 10e7 + grat_params[i, 3] * 10e5 +
grat_params[i, 4] * 10e3 + grat_params[i, 5] * 10e1 + grat_params[i, 6])
grat = Grating(width=width, pos=[grat_params[i, 0], grat_params[i, 1]], contrast=grat_params[i, 2],
rad=grat_params[i, 3], sf=grat_params[i, 4], orientation=grat_params[i, 5],
phase=grat_params[i, 6], stim_id= stim_id, format_id='{0:012d}', save_dir=save_dir,
size_px=[size_px, size_px], type_name=stim_name)
image_names[i] = (grat.save_stimulus())
image_local_file_path[i] = save_dir + os.sep + image_names[i]
all_y[i] = grat_params[i, 0]
all_x[i] = grat_params[i, 1]
all_c[i] = grat_params[i, 2]
all_r[i] = grat_params[i, 3]
all_s[i] = grat_params[i, 4]
all_o[i] = grat_params[i, 5]
all_p[i] = grat_params[i, 6]
stimuli['position_y'] = pd.Series(all_y)
stimuli['position_x'] = pd.Series(all_x)
stimuli['contrast'] = pd.Series(all_c)
stimuli['radius'] = pd.Series(all_r)
stimuli['spatial_frequency'] = pd.Series(all_s)
stimuli['orientation'] = pd.Series(all_o)
stimuli['phase'] = | pd.Series(all_p) | pandas.Series |
__version__ = 'v1'
__author__ = 'Vizerfur'
__function__ = ['del_unique_col','del_none_col','find_mul_class_col','translate',
'none_values_description','one_hot_encoder','data_info_desc']
__last_edit_time__ = 2/23/2020
import numpy
import random
import re
import pandas
import SDV.support
# 1
def del_unique_col(df):
"""delete columns those values are just single one."""
l = []
for each in df:
if len(df[each].value_counts()) == 1:
l.append(each)
df.drop(l,axis = 1,inplace = True)
if len(l) > 0:
print(f"Deleted {len(l)} features below:")
return l
else:
print('No qualified feature to delete')
# 2
def del_none_col(df,threshold = 0.5):
"""delete those columns whose none values number is bigger than threshold."""
l = []
for each in df:
if df[each].isnull().sum()/len(df[each]) > threshold:
l.append(each)
df.drop(l,axis = 1,inplace = True)
if len(l) > 0:
print(f"Deleted {len(l)} features below:")
return l
else:
print('No qualified feature to delete')
# 3
def find_mul_class_col(df,threshold_of_category_num = 100,rand_num = 500):
"""
A function for filter the object features that contain so many categories.
Also make a infomation analysis for each object features using reguler expression.
We divied the infomation to three types, number, alphabet and others.
And then the function return a dataframe. Dataframe have 5 columns.
The meauring scaler for data structure is Percentage.
----------
args:
threshold_of_category_num(int type,default = 100): Mean the categories counts threshold. When the feature's categories number is bigger than this, then we calculate the feature, otherwise pass.
rand_num(int type,default = 100) : Select rand_num sample from total data row randomly.
"""
f = []
f_m = []
d_s = []
length = len(df)
for each in df.select_dtypes(include = 'object').columns:
if len(df[each].value_counts()) > threshold_of_category_num:
f.append(each)
f_m.append(len(df[each].value_counts()))
random_list = [random.randint(0,length) for i in range(rand_num)] # select random numbers
s = ''
for ii in random_list:
try:
string_ = str(df.iloc[ii][each]) # sometimes raise a error like 'out of range'.
except:
pass
if string_ != 'nan': # str(nan) = 'nan', so we should filter this negtive influence.
try:
s += string_.replace(' ','') # delete blanks.
except:
pass
if len(s) == 0:
d_s.append({'Number':0.0,'Alphabet':0.0,'Others':0.0})
else:
d_s.append(SDV.support.data_struc(s)) # data_struc() return a dict.
return pandas.DataFrame(data ={'Object_features':f,
'Category_counts':f_m,
'Data_structure_number':[each['Number'] for each in d_s],
'Data_structure_alphebat':[each['Alphabet'] for each in d_s],
'Data_structure_others':[each['Others'] for each in d_s]})
# 4
def translate(t):
"""translate. (English to Chinese)"""
if type(t) == pandas.core.frame.DataFrame:
return pandas.DataFrame(data = {'Feature_name':t.columns,
'Translation':[SDV.support.translate(each) for each in t.columns]})
if type(t) == str:
return SDV.support.translate(t)
# 5
def none_values_description(df,filter_ = False):
"""Count np.nan type values of dataframe and return by percentage.
--------
filter_: If filter_ is True, the returned dataframe will filter the full-value features.
"""
fea_col = df.columns.tolist()
length = len(df)
null_num = [df[each].isnull().sum() for each in fea_col]
null_penc = [i/length for i in null_num]
return_ = pandas.DataFrame({'Feature_name':fea_col,
'None_values_counts':null_num,
'None_values_ratio':null_penc}).sort_values('None_values_ratio',ascending = False)
return_.index = list(range(len(return_)))
if filter_ is False:
return return_
if filter_ is True:
return return_[return_['None_values_ratio'] > 0]
# 6
def one_hot_encoder(df,encode_list = []):
"""return a processed dataframe.If encode_list(default blank list) is not appointed, the function will processing all the object features of input datafrme. If not, then just proceesing the encode_list givend."""
if len(encode_list) == 0:
ojb_fea = df.select_dtypes(include = 'object')
for each in ojb_fea:
oh = pandas.get_dummies(df[each],prefix = each)
df = | pandas.concat([df,oh],axis = 1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/11/17 22:11
describe: ้
ๅ CzscAdvancedTrader ่ฟ่กไฝฟ็จ็ๆ้ๅทฅๅ
ท
"""
import os
import dill
import inspect
import czsc
import traceback
import pandas as pd
from gm.api import *
from datetime import datetime, timedelta, timezone
from collections import OrderedDict
from typing import List, Callable
from czsc.traders import CzscAdvancedTrader
from czsc.utils import qywx as wx
from czsc.utils.bar_generator import BarGenerator
from czsc.utils.log import create_logger
from czsc.objects import RawBar, Event, Freq, Operate, PositionLong, PositionShort
from czsc.signals.signals import get_default_signals
dt_fmt = "%Y-%m-%d %H:%M:%S"
date_fmt = "%Y-%m-%d"
assert czsc.__version__ >= "0.8.13"
def set_gm_token(token):
with open(os.path.join(os.path.expanduser("~"), "gm_token.txt"), 'w', encoding='utf-8') as f:
f.write(token)
file_token = os.path.join(os.path.expanduser("~"), "gm_token.txt")
if not os.path.exists(file_token):
print("{} ๆไปถไธๅญๅจ๏ผ่ฏทๅ็ฌๅฏๅจไธไธช python ็ป็ซฏ๏ผ่ฐ็จ set_gm_token ๆนๆณๅๅปบ่ฏฅๆไปถ๏ผๅ้ๆฐๆง่กใ".format(file_token))
else:
gm_token = open(file_token, encoding="utf-8").read()
set_token(gm_token)
freq_gm2cn = {"60s": "1ๅ้", "300s": "5ๅ้", "900s": "15ๅ้",
"1800s": "30ๅ้", "3600s": "60ๅ้", "1d": "ๆฅ็บฟ"}
freq_cn2gm = {v: k for k, v in freq_gm2cn.items()}
indices = {
"ไธ่ฏๆๆฐ": 'SHSE.000001',
"ไธ่ฏ50": 'SHSE.000016',
"ๆฒชๆทฑ300": "SHSE.000300",
"ไธญ่ฏ1000": "SHSE.000852",
"ๆทฑ่ฏๆๆ": "SZSE.399001",
"ๅไธๆฟๆๆฐ": 'SZSE.399006',
"ๆทฑๆฌกๆฐ่ก": "SZSE.399678",
"ไธญๅฐๆฟๆ": "SZSE.399005",
"ไธญ่ฏ500": "SZSE.399905",
"ๅฝ่ฏ2000": "SZSE.399303",
"ๅฐ็ๆ้ฟ": "SZSE.399376",
"ๅฐ็ไปทๅผ": "SZSE.399377",
}
def is_trade_date(dt):
"""ๅคๆญ dt ๆถๅปๆฏไธๆฏไบคๆๆฅๆ"""
dt = pd.to_datetime(dt)
date_ = dt.strftime("%Y-%m-%d")
trade_dates = get_trading_dates(exchange='SZSE', start_date=date_, end_date=date_)
if trade_dates:
return True
else:
return False
def is_trade_time(dt):
"""ๅคๆญ dt ๆถๅปๆฏไธๆฏไบคๆๆถ้ด"""
dt = pd.to_datetime(dt)
date_ = dt.strftime("%Y-%m-%d")
trade_dates = get_trading_dates(exchange='SZSE', start_date=date_, end_date=date_)
if trade_dates and "15:00" > dt.strftime("%H:%M") > "09:30":
return True
else:
return False
def get_stocks():
"""่ทๅ่ก็ฅจๅธๅบๆ ็ๅ่กจ๏ผๅ
ๆฌ่ก็ฅจใๆๆฐ็ญ"""
df = get_instruments(exchanges='SZSE,SHSE', fields="symbol,sec_name", df=True)
shares = {row['symbol']: row['sec_name'] for _, row in df.iterrows()}
return shares
def get_index_shares(name, end_date=None):
"""่ทๅๆไธไบคๆๆฅ็ๆๆฐๆๅ่กๅ่กจ
symbols = get_index_shares("ไธ่ฏ50", "2019-01-01 09:30:00")
"""
if not end_date:
end_date = datetime.now().strftime(date_fmt)
else:
end_date = pd.to_datetime(end_date).strftime(date_fmt)
constituents = get_history_constituents(indices[name], end_date, end_date)[0]
symbol_list = [k for k, v in constituents['constituents'].items()]
return list(set(symbol_list))
def format_kline(df, freq: Freq):
bars = []
for i, row in df.iterrows():
bar = RawBar(symbol=row['symbol'], id=i, freq=freq, dt=row['eob'], open=round(row['open'], 2),
close=round(row['close'], 2), high=round(row['high'], 2),
low=round(row['low'], 2), vol=row['volume'], amount=row['amount'])
bars.append(bar)
return bars
def get_kline(symbol, end_time, freq='60s', count=33000, adjust=ADJUST_PREV):
"""่ทๅK็บฟๆฐๆฎ
:param symbol: ๆ ็ไปฃ็
:param end_time: ็ปๆๆถ้ด
:param freq: K็บฟๅจๆ
:param count: K็บฟๆฐ้
:param adjust: ๅคๆๆนๅผ
:return:
"""
if isinstance(end_time, datetime):
end_time = end_time.strftime(dt_fmt)
exchange = symbol.split(".")[0]
freq_map_ = {'60s': Freq.F1, '300s': Freq.F5, '900s': Freq.F15, '1800s': Freq.F30,
'3600s': Freq.F60, '1d': Freq.D}
if exchange in ["SZSE", "SHSE"]:
df = history_n(symbol=symbol, frequency=freq, end_time=end_time, adjust=adjust,
fields='symbol,eob,open,close,high,low,volume,amount', count=count, df=True)
else:
df = history_n(symbol=symbol, frequency=freq, end_time=end_time, adjust=adjust,
fields='symbol,eob,open,close,high,low,volume,amount,position', count=count, df=True)
return format_kline(df, freq_map_[freq])
def get_init_bg(symbol: str,
end_dt: [str, datetime],
base_freq: str,
freqs: List[str],
max_count=1000,
adjust=ADJUST_PREV):
"""่ทๅ symbol ็ๅๅงๅ bar generator"""
if isinstance(end_dt, str):
end_dt = pd.to_datetime(end_dt, utc=True)
end_dt = end_dt.tz_convert('dateutil/PRC')
# ๆถๅบ่ฝฌๆขไนๅ๏ผ่ฆๅๅป8ไธชๅฐๆถๆๆฏ่ฎพ็ฝฎ็ๆถ้ด
end_dt = end_dt - timedelta(hours=8)
else:
assert end_dt.tzinfo._filename == 'PRC'
last_day = (end_dt - timedelta(days=10)).replace(hour=16, minute=0)
bg = BarGenerator(base_freq, freqs, max_count)
if "ๅจ็บฟ" in freqs or "ๆ็บฟ" in freqs:
d_bars = get_kline(symbol=symbol, end_time=last_day, freq=freq_cn2gm["ๆฅ็บฟ"], count=5000, adjust=adjust)
bgd = BarGenerator("ๆฅ็บฟ", ['ๅจ็บฟ', 'ๆ็บฟ', 'ๅญฃ็บฟ', 'ๅนด็บฟ'])
for b in d_bars:
bgd.update(b)
else:
bgd = None
for freq in bg.bars.keys():
if freq in ['ๅจ็บฟ', 'ๆ็บฟ', 'ๅญฃ็บฟ', 'ๅนด็บฟ']:
bars_ = bgd.bars[freq]
else:
bars_ = get_kline(symbol=symbol, end_time=last_day, freq=freq_cn2gm[freq], count=max_count, adjust=adjust)
bg.bars[freq] = bars_
print(f"{symbol} - {freq} - {len(bg.bars[freq])} - last_dt: {bg.bars[freq][-1].dt} - last_day: {last_day}")
bars2 = get_kline(symbol=symbol, end_time=end_dt, freq=freq_cn2gm[base_freq],
count=int(240 / int(base_freq.strip('ๅ้'))*10))
data = [x for x in bars2 if x.dt > last_day]
assert len(data) > 0
print(f"{symbol}: bar generator ๆๆฐๆถ้ด {bg.bars[base_freq][-1].dt.strftime(dt_fmt)}๏ผ่ฟๆ{len(data)}่กๆฐๆฎ้่ฆupdate")
return bg, data
order_side_map = {OrderSide_Unknown: 'ๅ
ถไป', OrderSide_Buy: 'ไนฐๅ
ฅ', OrderSide_Sell: 'ๅๅบ'}
order_status_map = {
OrderStatus_Unknown: "ๅ
ถไป",
OrderStatus_New: "ๅทฒๆฅ",
OrderStatus_PartiallyFilled: "้จๆ",
OrderStatus_Filled: "ๅทฒๆ",
OrderStatus_Canceled: "ๅทฒๆค",
OrderStatus_PendingCancel: "ๅพ
ๆค",
OrderStatus_Rejected: "ๅทฒๆ็ป",
OrderStatus_Suspended: "ๆ่ตท๏ผๆ ๆ๏ผ",
OrderStatus_PendingNew: "ๅพ
ๆฅ",
OrderStatus_Expired: "ๅทฒ่ฟๆ",
}
pos_side_map = {PositionSide_Unknown: 'ๅ
ถไป', PositionSide_Long: 'ๅคๅคด', PositionSide_Short: '็ฉบๅคด'}
pos_effect_map = {
PositionEffect_Unknown: 'ๅ
ถไป',
PositionEffect_Open: 'ๅผไป',
PositionEffect_Close: 'ๅนณไป',
PositionEffect_CloseToday: 'ๅนณไปไป',
PositionEffect_CloseYesterday: 'ๅนณๆจไป',
}
exec_type_map = {
ExecType_Unknown: "ๅ
ถไป",
ExecType_New: "ๅทฒๆฅ",
ExecType_Canceled: "ๅทฒๆค้",
ExecType_PendingCancel: "ๅพ
ๆค้",
ExecType_Rejected: "ๅทฒๆ็ป",
ExecType_Suspended: "ๆ่ตท",
ExecType_PendingNew: "ๅพ
ๆฅ",
ExecType_Expired: "่ฟๆ",
ExecType_Trade: "ๆไบค(ๆๆ)",
ExecType_OrderStatus: "ๅงๆ็ถๆ",
ExecType_CancelRejected: "ๆคๅ่ขซๆ็ป(ๆๆ)",
}
def on_order_status(context, order):
"""
https://www.myquant.cn/docs/python/python_object_trade#007ae8f5c7ec5298
:param context:
:param order:
:return:
"""
if not is_trade_time(context.now):
return
symbol = order.symbol
latest_dt = context.now.strftime("%Y-%m-%d %H:%M:%S")
logger = context.logger
if symbol not in context.symbols_info.keys():
msg = f"่ฎขๅ็ถๆๆดๆฐ้็ฅ๏ผ\n{'*' * 31}\n" \
f"ๆดๆฐๆถ้ด๏ผ{latest_dt}\n" \
f"ๆ ็ๅ็งฐ๏ผ{symbol} {context.stocks.get(symbol, 'ๆ ๅ')}\n" \
f"ๆไฝ็ฑปๅ๏ผ{order_side_map[order.side]}{pos_effect_map[order.position_effect]}\n" \
f"ๆไฝๆ่ฟฐ๏ผ้ๆบๅจไบคๆๆ ็\n" \
f"ไธๅไปทๆ ผ๏ผ{round(order.price, 2)}\n" \
f"ๆๆฐ็ถๆ๏ผ{order_status_map[order.status]}\n" \
f"ๅงๆ๏ผ่ก๏ผ๏ผ{int(order.volume)}\n" \
f"ๅทฒๆ๏ผ่ก๏ผ๏ผ{int(order.filled_volume)}\n" \
f"ๅไปท๏ผๅ
๏ผ๏ผ{round(order.filled_vwap, 2)}"
else:
trader: GmCzscTrader = context.symbols_info[symbol]['trader']
if trader.long_pos.operates:
last_op_desc = trader.long_pos.operates[-1]['op_desc']
else:
last_op_desc = ""
msg = f"่ฎขๅ็ถๆๆดๆฐ้็ฅ๏ผ\n{'*' * 31}\n" \
f"ๆดๆฐๆถ้ด๏ผ{latest_dt}\n" \
f"ๆ ็ๅ็งฐ๏ผ{symbol} {context.stocks.get(symbol, 'ๆ ๅ')}\n" \
f"ๆไฝ็ฑปๅ๏ผ{order_side_map[order.side]}{pos_effect_map[order.position_effect]}\n" \
f"ๆไฝๆ่ฟฐ๏ผ{last_op_desc}\n" \
f"ไธๅไปทๆ ผ๏ผ{round(order.price, 2)}\n" \
f"ๆๆฐ็ถๆ๏ผ{order_status_map[order.status]}\n" \
f"ๅงๆ๏ผ่ก๏ผ๏ผ{int(order.volume)}\n" \
f"ๅทฒๆ๏ผ่ก๏ผ๏ผ{int(order.filled_volume)}\n" \
f"ๅไปท๏ผๅ
๏ผ๏ผ{round(order.filled_vwap, 2)}"
logger.info(msg.replace("\n", " - ").replace('*', ""))
if context.mode != MODE_BACKTEST and order.status in [1, 3, 5, 8, 9, 12]:
wx.push_text(content=str(msg), key=context.wx_key)
def on_execution_report(context, execrpt):
"""ๅๅบๅงๆ่ขซๆง่กไบไปถ๏ผๅงๆๆไบคๆ่
ๆคๅๆ็ปๅ่ขซ่งฆๅใ
https://www.myquant.cn/docs/python/python_trade_event#on_execution_report%20-%20%E5%A7%94%E6%89%98%E6%89%A7%E8%A1%8C%E5%9B%9E%E6%8A%A5%E4%BA%8B%E4%BB%B6
https://www.myquant.cn/docs/python/python_object_trade#ExecRpt%20-%20%E5%9B%9E%E6%8A%A5%E5%AF%B9%E8%B1%A1
:param context:
:param execrpt:
:return:
"""
if not is_trade_time(context.now):
return
latest_dt = context.now.strftime(dt_fmt)
logger = context.logger
msg = f"ๅงๆ่ฎขๅ่ขซๆง่ก้็ฅ๏ผ\n{'*' * 31}\n" \
f"ๆถ้ด๏ผ{latest_dt}\n" \
f"ๆ ็๏ผ{execrpt.symbol}\n" \
f"ๅ็งฐ๏ผ{context.stocks.get(execrpt.symbol, 'ๆ ๅ')}\n" \
f"ๆนๅ๏ผ{order_side_map[execrpt.side]}{pos_effect_map[execrpt.position_effect]}\n" \
f"ๆไบค้๏ผ{int(execrpt.volume)}\n" \
f"ๆไบคไปท๏ผ{round(execrpt.price, 2)}\n" \
f"ๆง่กๅๆฅ็ฑปๅ๏ผ{exec_type_map[execrpt.exec_type]}"
logger.info(msg.replace("\n", " - ").replace('*', ""))
if context.mode != MODE_BACKTEST and execrpt.exec_type in [1, 5, 6, 8, 12, 19]:
wx.push_text(content=str(msg), key=context.wx_key)
def on_backtest_finished(context, indicator):
"""ๅๆต็ปๆๅ่ฐๅฝๆฐ
:param context:
:param indicator:
https://www.myquant.cn/docs/python/python_object_trade#bd7f5adf22081af5
:return:
"""
wx_key = context.wx_key
symbols = context.symbols
data_path = context.data_path
logger = context.logger
logger.info(str(indicator))
logger.info("ๅๆต็ปๆ ... ")
cash = context.account().cash
for k, v in indicator.items():
if isinstance(v, float):
indicator[k] = round(v, 4)
row = OrderedDict({
"็ ็ฉถๆ ็": ", ".join(list(context.symbols_info.keys())),
"ๅๆตๅผๅงๆถ้ด": context.backtest_start_time,
"ๅๆต็ปๆๆถ้ด": context.backtest_end_time,
"็ดฏ่ฎกๆถ็็": indicator['pnl_ratio'],
"ๆๅคงๅๆค": indicator['max_drawdown'],
"ๅนดๅๆถ็็": indicator['pnl_ratio_annual'],
"ๅคๆฎๆฏ็": indicator['sharp_ratio'],
"็ๅฉๆฌกๆฐ": indicator['win_count'],
"ไบๆๆฌกๆฐ": indicator['lose_count'],
"ไบคๆ่็": indicator['win_ratio'],
"็ดฏ่ฎกๅบๅ
ฅ้": int(cash['cum_inout']),
"็ดฏ่ฎกไบคๆ้ข": int(cash['cum_trade']),
"็ดฏ่ฎกๆ็ปญ่ดน": int(cash['cum_commission']),
"็ดฏ่ฎกๅนณไปๆถ็": int(cash['cum_pnl']),
"ๅๆถ็": int(cash['pnl']),
})
sdt = pd.to_datetime(context.backtest_start_time).strftime('%Y%m%d')
edt = | pd.to_datetime(context.backtest_end_time) | pandas.to_datetime |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(
result: np.ndarray, mask: np.ndarray, other: Scalar
) -> Tuple[np.ndarray, bool]:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : scalar
The source value.
Returns
-------
result : ndarray
changed : bool
Set to true if the result array was upcasted.
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result, _ = maybe_upcast_putmask(arr, mask, False)
>>> result
array([1, 0, 3, 0, 0])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if not is_scalar(other):
# We _could_ support non-scalar other, but until we have a compelling
# use case, we assume away the possibility.
raise ValueError("other must be a scalar")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
if isna(other):
other = result.dtype.type("nat")
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if isna(other):
return changeit()
try:
np.place(result, mask, other)
except TypeError:
# e.g. int-dtype result and float-dtype other
return changeit()
return result, False
def maybe_casted_values(
index: "Index", codes: Optional[np.ndarray] = None
) -> ArrayLike:
"""
Convert an index, given directly or as a pair (level, code), to a 1D array.
Parameters
----------
index : Index
codes : np.ndarray[intp] or None, default None
Returns
-------
ExtensionArray or ndarray
If codes is `None`, the values of `index`.
If codes is passed, an array obtained by taking from `index` the indices
contained in `codes`.
"""
values = index._values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
mask: np.ndarray = codes == -1
if mask.size > 0 and mask.all():
# we can have situations where the whole mask is -1,
# meaning there is nothing found in codes, so make all nan's
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
else:
values = values.take(codes)
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
return values
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
val = Timestamp(val)
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif | lib.is_interval(val) | pandas._libs.lib.is_interval |
import unittest
import pandas as pd
import numpy as np
from econ_watcher_reader.reader import EconomyWatcherReader
import logging
logging.basicConfig()
logging.getLogger("econ_watcher_reader.reader").setLevel(level=logging.DEBUG)
class TestReaderCurrent(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kind_ = 'current'
# ----------------
# normal scenarios
# ----------------
def test_getting_data_for_one_month(self):
reader = EconomyWatcherReader()
data = reader.get_data(self.kind_, pd.datetime(2015,10,1), None)
# check column names
self.assertSetEqual(set(data.columns),
{'date', 'reason_type', 'industry', 'region', 'is_tokyo', 'field', 'score', 'reason_sentence'})
def test_getting_data_for_multiple_months(self):
reader = EconomyWatcherReader()
data = reader.get_data('current', pd.datetime(2018, 1, 1), pd.datetime(2018,5,1))
# check data range
self.assertListEqual(
list(pd.date_range(pd.datetime(2018, 1, 1), pd.datetime(2018,5,1), freq='MS').values),
list(np.sort(data.date.unique()))
)
def test_getting_all_available_data(self):
reader = EconomyWatcherReader()
data = reader.get_data('current')
date_in_data_str = ['{:%Y%m%d}'.format(pd.to_datetime(date_)) for date_ in data.date.unique()]
self.assertIn('{:%Y%m%d}'.format(reader.EARLIEST_MONTH), date_in_data_str)
self.assertIn('{:%Y%m%d}'.format(reader.LATEST_MONTH), date_in_data_str)
self.assertGreater(len(date_in_data_str), 2)
# --------------------
# non-normal scenarios
# --------------------
def test_raise_exception(self):
reader = EconomyWatcherReader()
# invalid `kind_` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_= 'invalid', start=pd.datetime(2018, 1, 1))
# invalid `start` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_='current', start=pd.datetime(1945,1,1))
# invalid `end` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_='current', start=pd.datetime(2100, 1, 1))
# pass `start` > `end`
with self.assertRaises(ValueError):
reader.get_data(kind_='current', start=pd.datetime(2018, 1, 1), end=pd.datetime(2017,1,1))
class TestReaderFuture(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kind_ = 'future'
# ----------------
# normal scenarios
# ----------------
def test_getting_data_for_one_month(self):
reader = EconomyWatcherReader()
data = reader.get_data(self.kind_, pd.datetime(2015,10,1), None)
# check column names
data.to_clipboard()
self.assertSetEqual(set(data.columns),
{'date', 'industry', 'region', 'is_tokyo', 'field', 'score', 'reason_sentence'})
def test_getting_data_for_multiple_months(self):
reader = EconomyWatcherReader()
data = reader.get_data('current', pd.datetime(2018, 1, 1), pd.datetime(2018,5,1))
# check data range
self.assertListEqual(
list(pd.date_range(pd.datetime(2018, 1, 1), pd.datetime(2018,5,1), freq='MS').values),
list(np.sort(data.date.unique()))
)
def test_getting_all_available_data(self):
reader = EconomyWatcherReader()
data = reader.get_data('current')
date_in_data_str = ['{:%Y%m%d}'.format(pd.to_datetime(date_)) for date_ in data.date.unique()]
self.assertIn('{:%Y%m%d}'.format(reader.EARLIEST_MONTH), date_in_data_str)
self.assertIn('{:%Y%m%d}'.format(reader.LATEST_MONTH), date_in_data_str)
self.assertGreater(len(date_in_data_str), 2)
# --------------------
# non-normal scenarios
# --------------------
def test_raise_exception(self):
reader = EconomyWatcherReader()
# invalid `kind_` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_= 'invalid', start=pd.datetime(2018, 1, 1))
# invalid `start` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_='current', start=pd.datetime(1945,1,1))
# invalid `end` parameter
with self.assertRaises(ValueError):
reader.get_data(kind_='current', start= | pd.datetime(2100, 1, 1) | pandas.datetime |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = | pd.DataFrame({'Value': [7]}, index=['count(X)']) | pandas.DataFrame |
import pandas as pd
from scipy.stats import chi2_contingency
import matplotlib.pyplot as plt
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
GENES.sort()
df = pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t')
dff = df.loc[df['from_general'].str.contains('Spain|LATAM')].copy()
N = 5001
N_espana = len(dff.loc[dff['from_general'] == 'Spain'])
N_latam = len(dff.loc[dff['from_general'] == 'LATAM'])
d_N_aux = {'Spain':N_espana, 'LATAM':N_latam}
chi_dict = dict()
chi2score_dict = dict()
alfreq_espaรฑa = dict()
alfreq_latam = dict()
alfreq_noalleles_espaรฑa = dict()
alfreq_noalleles_latam = dict()
for gene in GENES:
if gene != 'G6PD':
contigency= pd.crosstab(dff['from_general'], dff[gene])
posalleles = list(set([i.split('_')[1].split(',')[0].split('/')[1] for i in list(set(','.join(list(contigency.columns)).split(',')))]))#posalleles = list(set([i.split('_')[1].split(',')[0].split('/')[1] for i in list(contigency.columns)]))
for al in posalleles:
homozall = [i for i in list(contigency.columns) if '/'.join([al,al]) in i]
heterozall = [i for i in list(contigency.columns) if (al in i) and ~('/'.join([al,al]) in i)] #[i for i in list(contigency.columns) if '/'.join(['*1',al]) in i]
origin = list((contigency[heterozall].sum(axis=1) + contigency[homozall].sum(axis=1)*2).index)
vals = list((contigency[heterozall].sum(axis=1) + contigency[homozall].sum(axis=1)*2).values)
auxdict = dict(zip(origin,vals))
alfreq_espaรฑa[gene + '_' + al] = auxdict['Spain']
alfreq_latam[gene + '_' + al] = auxdict['LATAM']
alfreq_noalleles_espaรฑa[gene + '_' + al] = 2*N_espana - auxdict['Spain']
alfreq_noalleles_latam[gene + '_' + al] = 2*N_latam - auxdict['LATAM']
cont = pd.DataFrame({'from_general':origin, 'N_alleles':vals})
cont['No_alleles'] = 2*cont['from_general'].map(d_N_aux)
cont['No_alleles'] = cont['No_alleles'] - cont['N_alleles']
cont = cont.set_index('from_general')
chi2, p, dof, ex = chi2_contingency(cont)
chi2score_dict[gene + '_' + al] = chi2
chi_dict[gene + '_' + al] = p
else:
dff_aux = dff.loc[dff['gender'] == 'M']
contigency_males= pd.crosstab(dff_aux['from_general'], dff_aux[gene])
posalleles_males = list(set([i.split('_')[1].split(',')[0].split('/')[1] for i in list(set(','.join(list(contigency_males.columns)).split(',')))]))#posalleles = list(set([i.split('_')[1].split(',')[0].split('/')[1] for i in list(contigency.columns)]))
dff_aux = dff.loc[dff['gender'] == 'F']
contigency_females= | pd.crosstab(dff_aux['from_general'], dff_aux[gene]) | pandas.crosstab |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
_testing as tm,
concat,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
pytestmark = pytest.mark.single
def test_format_type(setup_path):
df = DataFrame({"A": [1, 2]})
with ensure_clean_path(setup_path) as path:
with HDFStore(path) as store:
store.put("a", df, format="fixed")
store.put("b", df, format="table")
assert store.get_storer("a").format_type == "fixed"
assert store.get_storer("b").format_type == "table"
def test_format_kwarg_in_constructor(setup_path):
# GH 13291
msg = "format is not a defined argument for HDFStore"
with tm.ensure_clean(setup_path) as path:
with pytest.raises(ValueError, match=msg):
HDFStore(path, format="table")
def test_api_default_format(setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_put(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError, match=msg):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError, match=msg):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(setup_path):
with ensure_clean_store(setup_path) as store:
index = Index([f"I am a very long string index: {i}" for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ [f"I am a very long string index: {i}" for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
msg = "Compression not supported on Fixed format stores"
with pytest.raises(ValueError, match=msg):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
msg = "Compression not supported on Fixed format stores"
with pytest.raises(ValueError, match=msg):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_put_mixed_type(setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
def test_store_index_types(setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
def test_column_multiindex(setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
msg = re.escape("cannot use a multi-index on axis [1] with data_columns ['A']")
with pytest.raises(ValueError, match=msg):
store.put("df2", df, format="table", data_columns=["A"])
msg = re.escape("cannot use a multi-index on axis [1] with data_columns True")
with pytest.raises(ValueError, match=msg):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo"))
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
| _maybe_remove(store, "df") | pandas.tests.io.pytables.common._maybe_remove |
import datetime as dt
import numpy as np
import pandas as pd
from tqdm import tqdm
from .. import utils
from ..ashare_data_reader import AShareDataReader
from ..data_source.data_source import DataSource
from ..database_interface import DBInterface
from ..factor import CompactFactor
from ..tickers import FundTickers, StockTickerSelector
class FactorCompositor(DataSource):
def __init__(self, db_interface: DBInterface = None):
"""
Factor Compositor
This class composite factors from raw market/financial info
:param db_interface: DBInterface
"""
super().__init__(db_interface)
self.data_reader = AShareDataReader(db_interface)
def update(self):
"""ๆดๆฐๆฐๆฎ"""
raise NotImplementedError()
class IndexCompositor(FactorCompositor):
def __init__(self, index_composition_policy: utils.StockIndexCompositionPolicy, db_interface: DBInterface = None):
"""่ชๅปบๆๆฐๆถ็่ฎก็ฎๅจ"""
super().__init__(db_interface)
self.table_name = '่ชๅๆๆๆฐ'
self.policy = index_composition_policy
self.weight = None
if index_composition_policy.unit_base:
self.weight = (CompactFactor(index_composition_policy.unit_base, self.db_interface)
* self.data_reader.stock_close).weight()
self.stock_ticker_selector = StockTickerSelector(self.policy.stock_selection_policy, self.db_interface)
def update(self):
""" ๆดๆฐๅธๅบๆถ็็ """
price_table = '่ก็ฅจๆฅ่กๆ
'
start_date = self.db_interface.get_latest_timestamp(self.table_name, self.policy.start_date,
column_condition=('ID', self.policy.ticker))
end_date = self.db_interface.get_latest_timestamp(price_table)
dates = self.calendar.select_dates(start_date, end_date, inclusive=(False, True))
with tqdm(dates) as pbar:
for date in dates:
pbar.set_description(f'{date}')
ids = self.stock_ticker_selector.ticker(date)
if ids:
t_dates = [(self.calendar.offset(date, -1)), date]
if self.weight:
rets = (self.data_reader.forward_return * self.weight).sum().get_data(dates=t_dates, ids=ids)
else:
rets = self.data_reader.stock_return.mean(along='DateTime').get_data(dates=t_dates, ids=ids)
index = | pd.MultiIndex.from_tuples([(date, self.policy.ticker)], names=['DateTime', 'ID']) | pandas.MultiIndex.from_tuples |
#!/usr/bin/env python3
import sys
import pandas as pd
import numpy as np
import json
from datetime import datetime
from hashlib import md5
import os.path as path
import argparse
import os.path as path
import pysolr
from uuid import uuid1
DEBUG = True
filename = 'output/PATH_005'
filename = 'output/PATH_147'
filename = 'output/PATH_016'
filename = 'output/PATH_024'
filename = 'output/PATH_008'
filename = 'output/PATH_090'
filename = 'output/AA_132'
filename = 'output/PATH_004'
filename = 'output/AA_003'
filename = 'output/HD_001'
filename = 'output/TR_002'
filename = 'output/PATH_004'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process trains services \
file to json')
parser.add_argument('inputfile', type=str, help='name of working \
timetable file to parse')
args = parser.parse_args()
filename = args.inputfile
DEBUG = False
filestub = path.basename(filename)
if DEBUG:
print(filename)
pd.set_option('display.max_columns', None)
ISO8601_DATE = datetime(1900, 1, 1)
DAY = pd.offsets.Day()
MONDAY = pd.offsets.Week(weekday=0)
def header_date(this_column):
return pd.to_datetime(this_column, format='%d%m%y').dt.strftime('%Y-%m-%d')
def wtt_date(this_column):
return pd.to_datetime(this_column, format='%y%m%d').dt.strftime('%Y-%m-%d')
def wtt_datetime(this_column):
return this_column.dt.strftime('%Y-%m-%dT%H:%M:%SZ')
def wtt_time(this_column, format='%H%M%S'):
this_column = this_column.str.replace('H', '30').str.replace(' ', '00')
return pd.to_datetime(this_column, format=format)
def blank_columns(this_frame):
return [n for n in this_frame.select_dtypes(include=['object']).columns if this_frame[n].str.isspace().all() or (this_frame[n] == '').all()]
def strip_columns(this_frame):
return [n for n in this_frame.select_dtypes(include=['object']).columns if this_frame[n].str.isspace().any()]
def days_str(this_series):
return pd.to_datetime(this_series).apply(lambda v: '{:b}'.format(64 >> v.weekday()).zfill(7))
def get_dates(this_df):
no_idx = this_df['Date To'].str.isspace()
this_df.loc[no_idx, 'Days'] = days_str(this_df.loc[no_idx, 'Date From'])
this_df.loc[no_idx, 'Date To'] = this_df.loc[no_idx, 'Date From']
this_df['Date From'] = pd.to_datetime(this_df['Date From'], format='%y%m%d')
this_df['Date To'] = pd.to_datetime(this_df['Date To'], format='%y%m%d')
this_df['Dates'] = wtt_date(this_df['Date From'] - MONDAY) + '.' + wtt_date(this_df['Date To'] + MONDAY) + '.' + this_df['Days']
this_df['Date From'] = wtt_datetime(this_df['Date From'])
this_df['Date To'] = wtt_datetime(this_df['Date To'])
return this_df[['Date From', 'Date To', 'Dates', 'Days']]
def header_record(records):
"""process CIF file header record from 80-character line string"""
this_array = [[line[0:2], line[2:22], line[22:28], line[28:32], line[32:39], line[39:46], line[46:47], line[47:48], line[48:54], line[54:60]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'File Mainframe Identity', 'Date of Extract', 'Time of Extract', 'Current File Ref', 'Last File Ref', 'Bleed off Update Ind', 'Version', 'User Extract Start Date', 'User Extract End Date'])
this_frame['Extract Datetime'] = pd.to_datetime(this_frame['Time of Extract'] + this_frame['Date of Extract'], format='%H%M%d%m%y').dt.strftime('%Y-%m-%dT%H:%M:%SZ')
this_frame['Extract Interval'] = header_date(this_frame['User Extract Start Date']) + '/' + header_date(this_frame['User Extract End Date'])
this_frame = this_frame.drop(['User Extract Start Date', 'User Extract End Date', 'Time of Extract', 'Date of Extract'], axis=1)
this_frame = this_frame.drop(blank_columns(this_frame), axis=1)
#this_frame['id'] = [md5(x.encode()).hexdigest() for x in records]
return this_frame
def tiploc_record(records):
"""return CIF file TIPLOC object from 80-character line string"""
this_array = [[line[0:2],line[2:9],line[9:11],line[11:17],line[17:18],line[18:44],line[44:49],line[49:53],line[53:56],line[56:72],line[72:79]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID','TIPLOC','Capitals Identification','Nalco','NLC check character','TPS Description','Stanox','PO MCP','CRS','Description','New TIPLOC'])
this_frame = this_frame.drop(blank_columns(this_frame), axis=1)
#this_frame['id'] = [md5(x.encode()).hexdigest() for x in records]
return this_frame
def notes_record(records):
"""return CIF file train notes object en route object from 80-character line string"""
this_array = [[line[0:2],line[2:3],line[3:80]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID','Note Type','Note'])
this_frame = this_frame.drop(blank_columns(this_frame), axis=1)
#this_frame['id'] = [md5(x.encode()).hexdigest() for x in records]
return this_frame
def association_record(records):
"""return CIF file train-association object from 80-character line string"""
this_array = [[line[0:2],line[2:3],line[3:9],line[9:15],line[15:21],line[21:27],line[27:34],line[34:36],line[36:37],line[37:44],line[44:45],line[45:46],line[47:48],line[79:80]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID','Transaction','Main UID','UID','Date From','Date To','Days','Category','Indicator','Location','Base Suffix','Location Suffix','Type','STP'])
this_frame[['Date From', 'Date To', 'Dates', 'Days']] = get_dates(this_frame)
#this_frame = this_frame.drop(['Date From', 'Date To'], axis=1)
this_frame = this_frame.drop(blank_columns(this_frame), axis=1)
#this_frame['id'] = [md5(x.encode()).hexdigest() for x in records]
return this_frame
def wtt_records(records):
this_array = [[line[0:2],line] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'Data'])
this_frame['key'] = [md5(x.encode()).hexdigest() for x in records]
this_frame.loc[this_frame['ID'] == 'BS', 'UUID'] = this_frame.loc[this_frame['ID'] == 'BS', 'key']
this_frame = this_frame.fillna(method='ffill')
return this_frame
def pa_record(this_df):
this_array = [['PA', line[2:3], line[3:9], line[9:15], line[15:21], line[21:28], line[79:80]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'Transaction','UID','Date From','Date To','Days','STP'])
this_frame[['Date From', 'Date To', 'Dates', 'Days']] = get_dates(this_frame)
#this_frame = this_frame.drop(['Date From', 'Date To'], axis=1)
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
return this_frame
def bs_record(this_df):
this_array = [[line[0:2], line[28:29], line[29:30], line[30:32], line[32:36], line[36:40], line[41:49], line[49:50], line[50:53], line[53:57], line[57:60], line[60:66], line[66:67], line[67:68], line[68:69], line[70:74], line[74:78]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'Bank Holiday Running', 'Train Status', 'Train Category', 'Headcode', 'NRS Headcode', 'Train Service', 'Portion Id', 'Power Type', 'Timing Load', 'Speed', 'Characteristics', 'Seating Class', 'Sleepers', 'Reservations', 'Catering', 'Service Branding'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
return this_frame
def bx_record(this_df):
this_array = [[line[0:2], line[6:11], line[11:13], line[13:14], line[14:22], line[22:23]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'UIC', 'ATOC', 'Applicable Timetable', 'RSID', 'Data Source'])
this_frame['UUID'] = this_df['UUID'].tolist()
return this_frame
def origin_location(this_df):
"""return CIF file depart from origin object, updated last reported time and
train operation duration from 80-character line string, the last reported time
and the train operation duration"""
this_array = [[line[0:2], line[2:9], line[9:10], line[10:15], line[15:19], line[19:22], line[22:25], line[25:27], line[27:29], line[29:41], line[41:43], line[43:46]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'TIPLOC', 'Suffix', 'Schedule', 'Public Schedule', 'Platform', 'Line', 'Engineering Allowance', 'Pathing Allowance', 'Activity', 'Performance Allowance', 'Reserved'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
this_frame['id'] += '.0'
this_frame['T'] = 'OD'
this_frame['index'] = this_df.index.tolist()
return this_frame
def intermediate_location(this_df):
"""return CIF file intermediate location object, updated last reported time
and train operation duration from 80-character line string, the last reported
time and the train operation duration"""
this_array = [[line[0:2], line[2:9], line[9:10], line[10:15], line[15:20], line[20:25], line[25:29], line[29:33], line[33:36], line[36:39], line[39:42], line[42:54], line[54:56], line[56:58], line[58:60], line[60:65]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'TIPLOC', 'Suffix', 'Schedule Arrival', 'Schedule Departure', 'Schedule Pass', 'Public Arrival', 'Public Departure', 'Platform', 'Line', 'Path', 'Activity', 'Engineering Allowance', 'Pathing Allowance', 'Performance Allowance', 'Reserved'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
this_frame['index'] = this_df.index.tolist()
idx_pass = (~this_frame['Schedule Pass'].str.isspace())
df_arrival = this_frame[~idx_pass].rename(columns={'Schedule Arrival': 'Schedule', 'Public Arrival': 'Public Schedule'})
df_arrival = df_arrival.drop(['Schedule Departure', 'Public Departure', 'Schedule Pass'], axis=1)
df_arrival['T'] = 'IA'
df_arrival['id'] += '.1'
df_departure = this_frame[~idx_pass].rename(columns={'Schedule Departure': 'Schedule', 'Public Departure': 'Public Schedule'})
df_departure = df_departure.drop(['Schedule Arrival', 'Public Arrival', 'Schedule Pass'], axis=1)
df_departure['T'] = 'ID'
df_departure['id'] += '.3'
df_pass = this_frame[idx_pass].rename(columns={'Schedule Pass': 'Schedule'})
df_pass = df_pass.drop(['Schedule Arrival', 'Public Arrival', 'Schedule Departure', 'Public Departure'], axis=1)
df_pass['Public Schedule'] = '0000'
df_pass['T'] = 'IP'
df_pass['id'] += '.2'
return pd.concat([df_arrival, df_departure, df_pass], sort=False)
def terminus_location(this_df):
this_array = [[line[0:2], line[2:9], line[9:10], line[10:15], line[15:19], line[19:22], line[22:25], line[25:27], line[37:40]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'TIPLOC', 'Suffix', 'Schedule', 'Public Schedule', 'Platform', 'Path', 'Activity', 'Reserved'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
this_frame['T'] = 'TA'
this_frame['id'] += '.4'
for key in ['Line', 'Engineering Allowance', 'Pathing Allowance', 'Performance Allowance']:
this_frame[key] = ''
this_frame['index'] = this_df.index.tolist()
return this_frame
def change_en_route(this_df):
"""return CIF file train change en route object from 80-character line string"""
this_array = [[line[0:2], line[2:9], line[9:10], line[10:12], line[12:16], line[16:20], line[21:29], line[29:30], line[30:33], line[33:37], line[37:40], line[40:46], line[46:47], line[47:48], line[48:49], line[50:54], line[54:58], line[62:67], line[67:75]] for line in this_df['Data']]
this_frame = pd.DataFrame(data=this_array, columns=['ID', 'TIPLOC', 'Suffix', 'Train Category', 'Headcode', 'NRS Headcode', 'Train Service', 'Portion Id', 'Power Type', 'Timing Load', 'Speed', 'Operating Characteristics', 'Seating Class', 'Sleepers', 'Reservations', 'Catering', 'Service Branding', 'UIC', 'Reserved'])
this_frame['UUID'] = this_df['UUID'].tolist()
this_frame['id'] = this_df['id'].tolist()
return this_frame
def get_wtt(this_df):
LO_frame = origin_location(this_df[this_df['ID'] == 'LO'])
LI_frame = intermediate_location(this_df[this_df['ID'] == 'LI'])
LT_frame = terminus_location(this_df[this_df['ID'] == 'LT'])
WTT = pd.concat([LO_frame, LI_frame, LT_frame], sort=False).sort_values(by=['index', 'id']).reset_index(drop=True)
WTT['Schedule'] = wtt_time(WTT['Schedule'])
idx_lo = (WTT['ID'] == 'LO')
WTT.loc[idx_lo, 'Offset'] = WTT.loc[idx_lo, 'Schedule']
WTT['Offset'] = WTT['Schedule'] - WTT['Offset'].fillna(method='ffill')
WTT.loc[WTT['Offset'] < pd.Timedelta(0), 'Offset'] += DAY
WTT['Offset'] += ISO8601_DATE
idx_ps = (WTT['Public Schedule'] != '0000')
WTT.loc[idx_ps, 'Public Schedule'] = wtt_time(WTT.loc[idx_ps, 'Public Schedule'], format='%H%M').dt.strftime('%H:%M')
WTT.loc[~idx_ps, 'Public Schedule'] = ''
WTT = WTT.drop('index', axis=1)
return WTT
def end_record(records):
this_array = [[line[0:2]] for line in records]
this_frame = pd.DataFrame(data=this_array, columns=['ID'])
#this_frame['id'] = [uuid1().hex for x in records]
return this_frame
SOLR_CONN = {}
SOLR_DATA = {}
def write_json(filename, this_df, key):
this_df = this_df.fillna('')
this_df.columns = [i.replace(' ', '_') for i in this_df.columns.to_list()]
this_buffer = ''
for _, r in this_df.iterrows():
u = {k: (v.rstrip() if isinstance(v, str) else v)
for k, v in r.to_dict().items() if isinstance(v, int) or v.rstrip() != ''}
this_buffer += json.dumps(u) + '\n'
if key == 'PATH':
u['id'] = uuid1().hex
with open(filename, 'w') as fout:
fout.write(this_buffer)
OP_FN = {'HD': header_record, 'TR': tiploc_record, 'AA': association_record, 'ZZ': end_record, 'PATH': wtt_records}
#OP_FN = {'HD': header_record, 'TI': tiploc_record} #, 'AA': association_record, 'ZZ': end_record, 'PATH': wtt_records}
M = filestub[-3:]
OUTPUT = []
#fin = sys.stdin
fin = open(filename, 'r')
KEY = None
CACHE = True
SERVICE = np.array([])
[OUTPUT.append(line.strip()) for line in fin]
ID = OUTPUT[0][0:2]
KEY = ID
if ID in ['BS', 'BX', 'CR', 'LI', 'LO', 'LT']:
KEY = 'PATH'
if ID in ['TI', 'TA', 'TD']:
KEY = 'TR'
df1 = OP_FN[KEY](OUTPUT)
df1['id'] = df1.index
df1['id'] = M + '.' + df1['id'].apply(lambda v: str(1 + v).zfill(8))
SA = pd.DataFrame()
if KEY == 'PATH':
idx_sa = (df1['ID'] == 'BS') | (df1['ID'] == 'BX') | (df1['ID'] == 'CR')
SA = df1.loc[idx_sa, ['ID', 'Data', 'UUID', 'id']]
WTT = get_wtt(df1)
lo_idx = (WTT['ID'] == 'LO')
lt_idx = (WTT['ID'] == 'LT')
df1 = WTT
df1['Schedule'] = df1['Schedule'].dt.strftime('%H:%M:%S')
df1['Offset'] = df1['Offset'].dt.strftime('%H:%M:%S')
if KEY in ['HD', 'ZZ']:
df1['seq'] = M
filename = 'storage/{}_{}.jsonl'.format(KEY, M)
write_json(filename, df1, KEY)
OUTPUT = []
if SA.empty:
if DEBUG:
1/0
sys.exit(0)
PA = pa_record(SA[SA['ID'] == 'BS']).set_index('UUID')
df2 = WTT.loc[lo_idx, ['Schedule', 'UUID']].set_index('UUID').rename(columns={'Schedule': 'Origin'})
PA = PA.join(df2)
df2 = WTT.loc[lt_idx, ['Schedule', 'Offset', 'UUID']].set_index('UUID').rename(columns={'Schedule': 'Terminus', 'Offset': 'Duration'})
PA = PA.join(df2)
df2 = WTT.loc[lt_idx, ['Schedule', 'Offset', 'UUID']].set_index('UUID')
df2 = (( | pd.to_timedelta(df2['Schedule']) | pandas.to_timedelta |
import numpy as np
import pandas as pd
from nwp_cali import PrepareData
from sklearn.model_selection import train_test_split
from sklearn.decomposition import NMF
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from joblib import dump
import datetime
date = datetime.datetime.now().strftime('%Y%m%d')
from time import perf_counter
path = '/home/users/aslee/CaCO3_NWP/'
y_tuple = {}
measurement = 'CaCO3%'
print('Begin NMF+SVR: {}'.format(measurement))
start = perf_counter()
prepare = PrepareData(measurement=measurement)
X, y = prepare.produce_Xy(prepare.select_data())
X_train, X_dev, y_train, y_dev = train_test_split(
X, y, test_size = 0.2, shuffle = True, random_state = 24)
# specified to the measurement
# the max_iter is increased from 8000 to 10000 to avoid faled convergence
pipe = make_pipeline(NMF(n_components=4, max_iter=10000, random_state=24),
SVR(C=1e2, gamma=1e3))
pipe.fit(X_train, np.log(y_train))
dump(pipe,
'{}models/{}_nmf+svr_model_{}.joblib'.format(
path, measurement[:-1].lower(), date))
print("The computation takes {} mins.".format(
(perf_counter() - start)/60))
y_df = pd.DataFrame([y_dev, np.exp(pipe.predict(X_dev))], index=[measurement, '{}_pred'.format(measurement)]).T
######################## Change measurement ###########################
measurement = 'TOC%'
print('Begin NMF+SVR: {}'.format(measurement))
start = perf_counter()
prepare = PrepareData(measurement=measurement)
X, y = prepare.produce_Xy(prepare.select_data())
X_train, X_dev, y_train, y_dev = train_test_split(
X, y, test_size = 0.2, shuffle = True, random_state = 24)
# specified to the measurement
pipe = make_pipeline(NMF(n_components=13, max_iter=8000, random_state=24),
SVR(C=10, gamma=1e3))
pipe.fit(X_train, np.log(y_train))
dump(pipe,
'{}models/{}_nmf+svr_model_{}.joblib'.format(
path, measurement[:-1].lower(), date))
print("The computation takes {} mins.".format(
(perf_counter() - start)/60))
tmp_df = pd.DataFrame([y_dev, np.exp(pipe.predict(X_dev))], index=[measurement, '{}_pred'.format(measurement)]).T
| pd.concat([y_df, tmp_df], axis=1, join='outer') | pandas.concat |
"""<2018.07.24>"""
import pandas as pd
import numpy as np
s= pd.Series([9904312,3448737,2890451,2466052],index=["Seoul","Busan","Incheon","Daegue"])
#print(s)
#print(s.index)
#print(s.values)
#s.name="์ธ๊ตฌ"
#s.index.name="๋์"
#print(s.index.name)
#์๋ฆฌ์ฆ์ ์ฐ์ฐ์ ํ๋ฉด value์๋ง ์ ์ฉ๋๋ค
#print(s/100000)
#print(s[(250e4<s)&(s<500e4)])
#Pandas์์๋ ๋ค์ ๋์ค๋ ์ซ์๊น์ง ํฌํจํ๋ฏ๋ก ์ฃผ์ํด์ผํ๋ค.
#print(s[:3])
#s0=pd.Series(range(3),index=["a","b","c"])
#print(s0)
#print("์์ธ" in s)
#for k,v in s.items():
# print("%s=%d"%(k,v))
s2=pd.Series({"Seoul":9631482,"Busan":3393191,"Incheon":2632035,"Daejoen":1490158})
#print(s2)
#๋์
๋๋ฆฌ์ ์์๋ ์์๋ฅผ ๊ฐ์ง์ง ์์ผ๋ฏ๋ก ์๋ฆฌ์ง์ ๋ฐ์ดํฐ๋ ์์๊ฐ ๋ณด์ฅ๋์ง ์๋๋ค.
#๋ง์ฝ ์์๋ฅผ ์ ํ๊ณ ์ถ๋ค๋ฉด ์ธ๋ฑ์ค๋ฅผ ๋ฆฌ์คํธ๋ก ์ง์ ํด์ผํ๋ค.
s2=pd.Series({"Seoul":9631482,"Busan":3393191,"Incheon":2632035,"Daejeon":1490158},
index=["Busan","Seoul","Incheon","Daejeon"])
#print(s2)
"""์ธ๋ฑ์ค ๊ธฐ๋ฐ ์ฐ์ฐ"""
ds=s-s2
#print(ds)
#print(s.values-s2.values)
#print(ds.notnull())
#print(ds[ds.notnull()])
#rs=(s-s2)/s2*100
#rs=rs[rs.notnull()]
#print(rs)
"""๋ฐ์ดํฐ ์์ """
#rs["Busan"]=1.63
#print(rs)
##๋ฐ์ดํฐ ์ถ๊ฐ
#rs["Daegue"]=1.41
#print(rs)
##๋ฐ์ดํฐ ์ญ์
#del rs["Seoul"]
#print(rs)
#volleyball=pd.Series({"receive":76.1,"spike":42.7,"toss":65.3,"dig":22.7,"attack":52.3,"defense":42.75},
# index=["attack","spike","defense","dig","receive","toss"])
#volleyball.name="KEPCO"
#print(volleyball)
#soccer=pd.Series({"pass":65.2,"counterattack":24.5,"defense":67.2,"attack":45.2,"shot":42.2,"tackle":12.4},
# index=["attack","counterattack","shot","pass","defense","tackle"])
#soccer.name="Mancity"
#print(soccer)
#log=volleyball-soccer
#print(log)
"""๋ฐ์ดํฐํ๋ ์ ํด๋์ค"""
data={
"2015": [9904312, 3448737, 2890451, 2466052],
"2010": [9631482, 3393191, 2632035, 2431774],
"2005": [9762546, 3512547, 2517680, 2456016],
"2000": [9853972, 3655437, 2466338, 2473990],
"์ง์ญ":["์๋๊ถ","๊ฒฝ์๊ถ","์๋๊ถ","๊ฒฝ์๊ถ"],
"2010-2015 ์ฆ๊ฐ์จ":[0.0283,0.0163,0.0982,0.0141]
}
columns=["์ง์ญ","2015","2010","2005","2000","2010-2015 ์ฆ๊ฐ์จ"]
index=["์์ธ","๋ถ์ฐ","์ธ์ฒ","๋๊ตฌ"]
df=pd.DataFrame(data,index=index,columns=columns)
#print(df)
#์ด๋ฐฉํฅ ์ธ๋ฑ์ค์ ํ๋ฐฉํฅ ์ธ๋ฑ์ค ๋ถํ๊ธฐ
df.index.name="๋์"
df.columns.name="ํน์ฑ"
#print(df)
result={
"Point":[100,81,77,75,70],
"Win":[32,25,23,21,21],
"Draw":[4,6,8,12,7],
"Lose":[2,7,7,5,10],
"Goal difference":[79,40,38,46,24]}
items=["Point","Win","Draw","Lose","Goal difference"]
Team_name=["MCI","MUN","TOT","LIV","CHE"]
league=pd.DataFrame(result,index=Team_name,columns=items)
#print(league)
#๋ฐ์ดํฐ ํ๋ ์์ T๋ฅผ ๋ถํ์ ์ ์น(Transpose)๋ฅผ ํ๋๊ฒ์ด ๊ฐ๋ฅํ๋ค.
#print(league.T)
#print(league[["Win","Draw","Lose"]])
df2=pd.DataFrame(np.arange(12).reshape(3,4))
#print(df2)
df["2010-2015 ์ฆ๊ฐ์จ"]=df["2010-2015 ์ฆ๊ฐ์จ"]*100
#print(df)
#print(df[1:3])
data={
"Korea":[80,90,70,30],
"English":[90,70,60,40],
"Math":[90,60,80,70],}
columns=["Korea","English","Math"]
index=["Kim","Lee","Park","Choi"]
df=pd.DataFrame(data,columns=columns,index=index)
#print(df)
#1.๋ชจ๋ ํ์์ ์ํ ์ ์๋ฅผ ์๋ฆฌ์ฆ๋ก ๋ํ๋ธ๋ค.
#print(df[["Math"]])
#2.๋ชจ๋ ํ์์ ๊ตญ์ด์ ์์ด ์ ์๋ฅผ ๋ฐ์ดํฐ ํ๋ ์์ผ๋ก ๋ํ๋ธ๋ค.
#print(df[["English","Korea"]])
#3.๋ชจ๋ ํ์์ ๊ฐ ๊ณผ๋ชฉ ํ๊ท ์ ์๋ฅผ ์๋ก์ด ์ด๋ก ์ถ๊ฐํ๋ค.
#axis=1์ด ํ ๊ธฐ์ค์ผ๋ก ํ๊ท ์ ๊ตฌํ๋ผ๋ ์๋ฏธ๋ก ํด์
avg=df.mean(axis=1)
df["Average"]=avg
#print(df)
#4.Choi์ ์์ด ์ ์๋ฅผ 80์ ์ผ๋ก ์์ ํ๊ณ ํ๊ท ์ ์๋ ๋ค์ ๊ณ์ฐํ๋ค.
#df.loc["Choi","English"]=80
#print(df)
#avg=df.mean(axis=1)
#df["Average"]=avg
#print(df)
#๋ฌธ์ ํด๊ฒฐํด์ผ ํ๋ค.
#Kim์ ์ ์๋ฅผ ๋ฐ์ดํฐํ๋ ์์ผ๋ก ๋ํ๋ธ๋ค.
#print(df.iloc[0])
#Park์ ์ ์๋ฅผ ์๋ฆฌ์ฆ๋ก ๋ํ๋ธ๋ค.
#print(df.iloc[2])
"""๋ฐ์ดํฐํ๋ ์ ์ธ๋ฑ์"""
box=pd.DataFrame(np.arange(10,22).reshape(3,4),
index=["r1","r2","r3"],
columns=["c1","c2","c3","c4"])
#print(box)
"""loc์ธ๋ฑ์"""
#df.loc[ํ์ธ๋ฑ์ค(row),์ด์ธ๋ฑ์ค(column)]์ ๊ฐ์ ํํ๋ก ์ฌ์ฉํ๋ค.
#print(box.loc["r1","c2"])
#print(box.loc["r1":,"c3"])
#print(box.loc["r2":,"c2":])
#ํน์ ์กฐ๊ฑด์ ํด๋นํ๋ ๊ฒ๋ง ์ถ์ถ
#print(box.loc[box.c1>10])
#print(box.loc["r1",:])
#print(box[:1])
#์ด ๋ฐ์ดํฐ ์ถ๊ฐ
#box["c5"]=[14,18,22]
#print(box)
#ํ ๋ฐ์ดํฐ ์ถ๊ฐ
#box.loc["r4"]=[90,91,92,93,94]
#print(box)
#ํ ๋ฐ์ดํฐ ์ถ๊ฐ / ์ ๊ฑฐ
#box.loc["r5"]=[100,101,102,103,104]
#print(box)
#box=box.drop("r5")
#print(box)
box2=pd.DataFrame(np.arange(10,26).reshape(4,4),
columns=np.arange(1,8,2))
#print(box2)
#print(box2.loc[1,1])
#print(box2.loc[1:2,:])
"""iloc์ธ๋ฑ์"""
#์ ์ ์ธ๋ฑ์ค๋ง ๋ฐฉ๋๋ค
#box์ 0ํ 1์ด ๋ฐ์ดํฐ
#print(box.iloc[0,1])
#print(box.iloc[:2,2])
"""<2018.07.25>"""
"""๋ฐ์ดํฐ ๊ฐฏ์ ์ธ๊ธฐ"""
#10ํ์ ๋ฐ์ดํฐ ์์ฑ
s=pd.Series(range(10))
#3๋ฒ ์ธ๋ฑ์ค์ NAN ์์ฑ
s[3]=np.nan
#print(s)
#count๋ NAN์ ๊ฐ์๋ฅผ ์ธ์ง ์๋๋ค.
#print("s์ NAN์ ์ ์ธํ ๊ฐฏ์๋ {}".format(s.count()))
np.random.seed(2)
df=pd.DataFrame(np.random.randint(5,size=(4,4)),dtype=float)
df.iloc[2,3]=np.nan
#print(df)
#๊ฐ ์ด๋ง๋ค ๋ณ๋์ ๋ฐ์ดํฐ ๊ฐฏ์๋ฅผ ์ธ์ด์ฃผ๋ฏ๋ก ๋ฐ์ดํฐ๊ฐ ๋๋ฝ๋ ๊ฒ์ ์ฐพ์ ์ ์๋ค.
#print(df.count())
"""์ฐ์ต ๋ฌธ์ 1
๋ค์ ๋ช
๋ น์ผ๋ก ํ์ดํ๋ํธ ์น๊ฐ ๋ฐ์ดํฐ๋ฅผ ๋ฐ์ดํฐํ๋ ์์ผ๋ก ์ฝ์ด์จ๋ค. ์ด ๋ช
๋ น์ ์คํํ๋ ค๋ฉด seaborn ํจํค์ง๊ฐ ์ค์น๋์ด ์์ด์ผ ํ๋ค.
import seaborn as sns
titanic = sns.load_dataset("titanic")
ํ์ดํ๋ํธ ์น๊ฐ ๋ฐ์ดํฐ์ ๋ฐ์ดํฐ ๊ฐ์ ๊ฐ ์ด๋ง๋ค ๊ตฌํด๋ณธ๋ค.
"""
import seaborn as sns
titanic=sns.load_dataset("titanic")
#print(titanic["age"].value_counts())
#print(titanic.head())
#print(titanic.count())
"""์นดํ
๊ณ ๋ฆฌ ๊ฐ ์ธ๊ธฐ"""
np.random.seed(1)
s2=pd.Series(np.random.randint(6,size=100))
#print(s2)
#tail()๋ค์์ ๋ช๊ฐ๋ง ๋ณด์ฌ์ค๋ค
#print(s2.tail())
# ์๋ฆฌ์ฆ์ ๊ฐ์ด ์ ์, ๋ฌธ์์ด, ์นดํ
๊ณ ๋ฆฌ ๊ฐ์ธ ๊ฒฝ์ฐ์ value_counts()๋ ๊ฐ๋ณ๋ก ๋ช๊ฐ์ฉ ์กด์ฌํ๋์ง ์๋ ค์ค๋ค.
#print(s2.value_counts())
"""์ ๋ ฌ"""
#์ธ๋ฑ์ค ๊ธฐ์ค ์ ๋ ฌ
#print(s2.value_counts().sort_index())
#Value ๊ธฐ์ค ์ ๋ ฌ
#print(s2.value_counts().sort_values())
#NaN๊ฐ์ด ์๋ ๊ฒฝ์ฐ์๋ ์ ๋ ฌํ๋ฉด NAN๊ฐ์ด ๊ฐ์ฅ ๋์ค์ ๋์จ๋ค.
ran=pd.Series(range(10))
ran[8]=np.nan
#print(ran)
#print(ran.sort_values())
#ํฐ ์์์ ์์ ์๋ก ๋ฐ๋ ์ ๋ ฌํ๋ ค๋ฉด ascending=False๋ก ์ง์
#print(ran.sort_values(ascending=False))
#sort_values๋ฉ์๋๋ฅผ ์ฌ์ฉํ๋ ค๋ฉด by์ธ์๋ก ์ ๋ ฌ ๊ธฐ์ค์ด ๋๋ ์ด์ ์ง์ ํ ์ ์๋ค.
#print(df.sort_values(by=1))
#print(df.sort_values(by=[1,2]))
"""
์ฐ์ต ๋ฌธ์ 2
ํ์ดํ๋ํธ ์น๊ฐ์ค ์ฑ๋ณ(sex) ์ธ์์, ๋์ด๋ณ(age) ์ธ์์, ์ ์ค๋ณ(class) ์ธ์์, ์ฌ๋ง/์์กด(alive) ์ธ์์๋ฅผ ๊ตฌํ๋ผ.
"""
#print("Titanic์ ํ์น๊ฐ ์ฑ๋ณ ๊ตฌ์ฑ์ {}".format(titanic["sex"].value_counts()))
#print("Titanic์ ํ์น๊ฐ ์ฐ๋ น๋ณ ๊ตฌ์ฑ์ {}".format(titanic["age"].value_counts().head()))
#print("Titanic์ ์ ์ค๋ณ ์ธ์ ๊ตฌ์ฑ์ {}".format(titanic["class"].value_counts()))
#print("Titanic์ ์์กด ์ธ์์๋ {}".format(titanic["alive"].value_counts()))
"""ํ/์ด ํฉ๊ณ"""
#df2=pd.DataFrame(np.random.randint(10,size=(4,8)))
#print(df2)
##ํ๋ณ๋ก ํฉ๊ณ ๊ตฌํ๊ธฐ
#print(df2.sum(axis=1))
##์ด๋ณ๋ก ํฉ๊ณ ๊ตฌํ๊ธฐ
#print(df2.sum(axis=0))
#print(df2.sum())
#df2["RowSum"]=df2.sum(axis=1)
#print(df2)
#df2.loc["ColTotal",:]=df2.sum()
#print(df2)
"""apply๋ณํ"""
#ํ์ด๋ ์ด ๋จ์๋ก ๋ ๋ณต์กํ ์ฒ๋ฆฌ๋ฅผ ํ๊ณ ์ถ์ ๋๋ apply ๋ฉ์๋๋ฅผ ์ฌ์ฉํ๋ค.
#์ธ์๋ก ํ ๋๋ ์ด ์ ๋ฐ๋ ํจ์๋ฅผ apply ๋ฉ์๋์ ์ธ์๋ก ๋ฃ์ผ๋ฉด ๊ฐ ์ด(๋๋ ํ)์ ๋ฐ๋ณตํ์ฌ ๊ทธ ํจ์์ ์ ์ฉ์ํจ๋ค.
df3=pd.DataFrame({
'A':[1,3,4,3,4],
'B':[2,3,1,2,3],
'C':[1,5,2,4,4]
})
#print(df3)
#๊ฐ ์ด์ ์ต๋๊ฐ๊ณผ ์ต์๊ฐ์ ์ฐจ์ด๋ฅผ ๊ตฌํ๊ณ ์ถ์ผ๋ฉด ๋ค์๊ณผ ๊ฐ์ ๋๋ค ํจ์๋ฅผ ๋ฃ๋๋ค.
#print("๊ฐ ์ด์ ์ต๋๊ฐ๊ณผ ์ต์๊ฐ์ ์ฐจ \n{}".format(df3.apply(lambda x:x.max()-x.min())))
#๋ง์ผ ๊ฐ ํ์ ๋ํด์ ์ ์ฉํ๊ณ ์ถ๋ค๋ฉด axis=1์ ์ธ์๋ฅผ ์ฌ์ฉํ๋ค.
#print("๊ฐ ํ์ ์ต๋๊ฐ๊ณผ ์ต์๊ฐ์ ์ฐจ \n{}".format(df3.apply(lambda x:x.max()-x.min(),axis=1)))
#๊ฐ ์ด์ ๋ํด ์ด๋ค๊ฐ์ด ์ผ๋ง๋ ์ฌ์ฉ๋์๋์ง ์๊ณ ์ถ๋ค๋ฉด value_counts ํจ์๋ฅผ ๋ฃ์ ์ ์๋ค.
#print(df3.apply(pd.value_counts))
#NaN๊ฐ์ fillna ๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ์ํ๋ ๊ฐ์ผ๋ก ๋ฐ๊ฟ ์ ์๋ค.
#astype ๋ฉ์๋๋ก ์ ์ฒด ๋ฐ์ดํฐ์ ์๋ฃํ์ ๋ฐ๊พธ๋๊ฒ๋ ๊ฐ๋ฅํ๋ค.
#print(df3.apply(pd.value_counts).fillna(0).astype(int))
"""์ค์ ๊ฐ์ ์นดํ
๊ณ ๋ฆฌ ๊ฐ์ผ๋ก ๋ณํ(์ผ์ ๋ฒ์์ ๋ฐ์ดํฐ ๋ฃ๊ธฐ)"""
#cut:์ค์ ๊ฐ์ ๊ฒฝ๊ณ์ ์ ์ง์ ํ๋ ๊ฒฝ์ฐ
#qcut:๊ฐฏ์๊ฐ ๋๊ฐ์ ๊ตฌ๊ฐ์ผ๋ก ๋๋๋ ๊ฒฝ์ฐ
ages=[0,2,10,21,23,37,61,20,41,32,100]
bins=[1,15,25,35,60,99]
labels=["๋ฏธ์ฑ๋
์","์ฒญ๋
","์ค๋
","์ฅ๋
","๋
ธ๋
"]
cats=pd.cut(ages,bins,labels=labels)
#print(cats)
df4=pd.DataFrame(ages,columns=["ages"])
df4["age_cat"]=pd.cut(df4.ages,bins,labels=labels)
#print(df4)
#qcut ๋ช
๋ น์ ๊ตฌ๊ฐ ๊ฒฝ๊ณ์ ์ ์ง์ ํ์ง ์๊ณ ๋ฐ์ดํฐ ๊ฐฏ์๊ฐ ๊ฐ๋๋ก ์ง์ ํ ์์ ๊ตฌ๊ฐ์ผ๋ก ๋๋๋ค.
#์๋ฅผ ๋ค์ด ๋ค์ ์ฝ๋๋ 1000๊ฐ์ ๋ฐ์ดํฐ๋ฅผ 4๊ฐ์ ๊ตฌ๊ฐ์ผ๋ก ๋๋๋๋ฐ ๊ฐ ๊ตฌ๊ฐ์ 250๊ฐ์ฉ์ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ง๋ค.
data=np.random.randn(1000)
cats= | pd.qcut(data,4,labels=["Q1","Q2","Q3","Q4"]) | pandas.qcut |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This python file evaluates the Machine learning models from the SKlearn libraries
using cross-validation method and output the test score to select top 5 models.
"""
import pandas
import csv
import numpy as np
import time
import signal
import warnings
from sklearn.model_selection import cross_validate
from sklearn.metrics import make_scorer
# This function (taken from the web) can be used to terminate other functions that exceeds the time we give to run in seconds
def deadline(timeout, *args):
def decorate(f):
def handler(signum, frame):
raise Exception
def new_f(*args):
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
return f(*args)
signal.alarm(0)
new_f.__name__ = f.__name__
return new_f
return decorate
'''
I have tested using both classifiers and regressors. Classifiers are not able to
give us more than one prediction. Therefore, a dropdown list cannot be obtained with it.
Only regressors are hence used here. Most Regressors as base estimators do not support
multiple outputs. Therefore sklearn's multioutput meta-estimators are used to make them
support multioutput feature.
'''
#importing all regrrssor multioutput meta-estimators
from sklearn.multioutput import RegressorChain
from sklearn.multioutput import MultiOutputRegressor
#importing all ensembles regressor estimators
from sklearn.ensemble.weight_boosting import AdaBoostRegressor
from sklearn.ensemble.bagging import BaggingRegressor
from sklearn.ensemble.forest import ExtraTreesRegressor
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.ensemble.forest import RandomForestRegressor
#importing all base regressor estimators
from sklearn.linear_model.bayes import ARDRegression
from sklearn.linear_model.bayes import BayesianRidge
from sklearn.naive_bayes import BernoulliNB
from sklearn.cross_decomposition.cca_ import CCA
from sklearn.tree.tree import DecisionTreeRegressor
from sklearn.linear_model.coordinate_descent import ElasticNet
from sklearn.tree.tree import ExtraTreeRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.gaussian_process.gpr import GaussianProcessRegressor
from sklearn.linear_model.huber import HuberRegressor
from sklearn.neighbors.regression import KNeighborsRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.semi_supervised.label_propagation import LabelPropagation
from sklearn.semi_supervised.label_propagation import LabelSpreading
from sklearn.linear_model.least_angle import Lars
from sklearn.linear_model.coordinate_descent import Lasso
from sklearn.linear_model.least_angle import LassoLars
from sklearn.linear_model.least_angle import LassoLarsIC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model.base import LinearRegression
from sklearn.svm.classes import LinearSVR
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.neural_network.multilayer_perceptron import MLPRegressor
from sklearn.linear_model.coordinate_descent import MultiTaskElasticNet
from sklearn.linear_model.coordinate_descent import MultiTaskLasso
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.svm.classes import NuSVR
from sklearn.linear_model.omp import OrthogonalMatchingPursuit
from sklearn.cross_decomposition.pls_ import PLSCanonical
from sklearn.cross_decomposition.pls_ import PLSRegression
from sklearn.linear_model.passive_aggressive import PassiveAggressiveRegressor
from sklearn.linear_model.perceptron import Perceptron
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model.ransac import RANSACRegressor
from sklearn.neighbors.regression import RadiusNeighborsRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.linear_model.theil_sen import TheilSenRegressor
# multioutputs = [MultiOutputRegressor, RegressorChain]
# ensembles = [AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor, GradientBoostingRegressor, RandomForestRegressor]
# bases = [ARDRegression, BayesianRidge, BernoulliNB, CCA, DecisionTreeRegressor, ElasticNet, ExtraTreeRegressor, GaussianNB, GaussianProcessRegressor, HuberRegressor, KNeighborsRegressor, KernelRidge, LabelPropagation, LabelSpreading, Lars, Lasso, LassoLars, LassoLarsIC, LinearDiscriminantAnalysis, LinearRegression, LinearSVR, LogisticRegression, MLPRegressor, MultiTaskElasticNet, MultiTaskLasso, MultinomialNB, NearestCentroid, NuSVR, OrthogonalMatchingPursuit, PLSCanonical, PLSRegression, PassiveAggressiveRegressor, Perceptron, QuadraticDiscriminantAnalysis, RANSACRegressor, RadiusNeighborsRegressor, Ridge, SGDRegressor, SVR, TheilSenRegressor]
multioutputs = [MultiOutputRegressor,RegressorChain]
ensembles = [AdaBoostRegressor, BaggingRegressor]
bases = [ARDRegression, BayesianRidge, BernoulliNB, CCA, DecisionTreeRegressor, ElasticNet, ExtraTreeRegressor, GaussianNB, GaussianProcessRegressor, HuberRegressor, KNeighborsRegressor, KernelRidge, LabelPropagation, LabelSpreading, Lars, Lasso, LassoLars, LassoLarsIC, LinearDiscriminantAnalysis, LinearRegression, LinearSVR, LogisticRegression, MLPRegressor, MultiTaskElasticNet, MultiTaskLasso, MultinomialNB, NearestCentroid, NuSVR, OrthogonalMatchingPursuit, PLSCanonical, PLSRegression, PassiveAggressiveRegressor, Perceptron, QuadraticDiscriminantAnalysis, RANSACRegressor, RadiusNeighborsRegressor, Ridge, SGDRegressor, SVR, TheilSenRegressor]
# This is the custom scorer defined.
# The lower the score the better. A score of 1 would be the best possible score.
def lowest_correct(trues, preds):
num_of_options = len(trues) # number of class labels
drop_down_options = list(reversed(np.argsort(preds))) # Based on the regressor values, highest (most probable) value first
correct_options = [i for i in range(num_of_options) if trues[i]==1] # get the index of the correct label
return min([drop_down_options.index(correct_option) for correct_option in correct_options]) + 1 #check how far is that index in the dropdown list and return that value
def average_lowest_correct(list_of_trues, list_of_preds):
length = len(list_of_trues) # number of data points
return np.mean([lowest_correct(list(list_of_trues.iloc[i]), list(list_of_preds[i])) for i in range(length)])
# This is the cross validate function that fits the data on all multioutput-base estimators and evaluate the model predictions based on the scorer function defined.
@deadline(180) # terminate running instance of this function if it exceeds 50 seconds
def cv_base(multioutput, base, xs, ys):
#Here we cross_validate the model. Cross validate split the data set into train and test and return the score and time of fitting etc.
temp = cross_validate(multioutput(base()), xs, ys, scoring=make_scorer(average_lowest_correct), n_jobs=-1, cv=5) # 5-fold cross validation
return [multioutput.__name__, None, base.__name__, (np.sum(temp['score_time'])).round(2), np.mean(temp['test_score']).round(2)]
# This is the cross validate function that fits the data on all multioutput-ensemble estimators and evaluate the model predictions based on the scorer function defined.
@deadline(180)
def cv_ensemble(multioutput, ensemble, xs, ys):
temp = cross_validate(multioutput(ensemble()), xs, ys, scoring=make_scorer(average_lowest_correct), n_jobs=-1, cv=5)
return [multioutput.__name__, ensemble.__name__, None, (np.sum(temp['score_time'])).round(2), np.mean(temp['test_score']).round(2)]
# This funtion cross-validate all the ensemble-base combinations
@deadline(180)
def cv_ensemble_base(multioutput, ensemble, base, xs, ys):
temp = cross_validate(multioutput(ensemble(base())), xs, ys, scoring=make_scorer(average_lowest_correct), n_jobs=-1, cv=5)
return [multioutput.__name__, ensemble.__name__, base.__name__, (np.sum(temp['score_time'])).round(2), np.mean(temp['test_score']).round(2)]
# Process step
X_ps = pandas.read_csv('../out/train/X_PS_train.csv', delimiter=',', encoding='latin-1')
Y_ps = pandas.read_csv('../out/train/Y_PS_train.csv', delimiter=',', encoding='latin-1')
ps_models = pandas.DataFrame(columns=['Multioutput', 'Ensemble', 'Base', 'Score time (s)', 'Score (lower the better)'])
row = 0
for multioutput in multioutputs:
for base in bases:
print ("Cross-validating the ",base.__name__, " model\n")
try:
results = cv_base(multioutput, base, X_ps, Y_ps)
for i in range(5):
ps_models.at[row, ps_models.columns[i]] = results[i]
row += 1
except Exception:
pass
print ("------ All bases are now cross-validated ------\n")
for ensemble in ensembles:
print ("Cross-validating the ",ensemble.__name__, " model\n")
try:
results = cv_ensemble(multioutput, ensemble, X_ps, Y_ps)
for i in range(5):
ps_models.at[row, ps_models.columns[i]] = results[i]
row += 1
except Exception:
pass
print ("-------All ensembles are now cross-validated ------\n")
for ensemble in ensembles:
for base in bases:
print ("Cross-validating the ",ensemble.__name__," + ",base.__name__, "combination\n")
try:
results = cv_ensemble_base(multioutput, ensemble, base, X_ps, Y_ps)
for i in range(5):
ps_models.at[row, ps_models.columns[i]] = results[i]
row += 1
except Exception:
pass
ps_models.to_csv('../out/5_PS_models_evaluation.csv', encoding='utf-8', index=False, quoting=csv.QUOTE_NONNUMERIC)
# Problem type
X_pt = | pandas.read_csv('../out/train/X_PT_train.csv', delimiter=',', encoding='latin-1') | pandas.read_csv |
# write_Crosswalk_USGS_NWIS_WU.py (scripts)
# !/usr/bin/env python3
# coding=utf-8
# <EMAIL>
"""
Create a crosswalk linking the downloaded USGS_NWIS_WU to NAICS_12. Created by selecting unique Activity Names and
manually assigning to NAICS
"""
import pandas as pd
from flowsa.common import datapath
from scripts.common_scripts import unique_activity_names, order_crosswalk
def assign_naics(df):
"""manually assign each ERS activity to a NAICS_2012 code"""
df.loc[df['Activity'] == 'Aquaculture', 'Sector'] = '1125'
# df.loc[df['Activity'] == 'Commercial', 'Sector'] = ''
df.loc[df['Activity'] == 'Domestic', 'Sector'] = 'F01000'
df.loc[df['Activity'] == 'Hydroelectric Power', 'Sector'] = '221111'
df.loc[df['Activity'] == 'Industrial', 'Sector'] = '1133'
df = df.append(pd.DataFrame([['Industrial', '23']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '31']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '32']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '33']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '48839']], columns=['Activity', 'Sector']), sort=True)
df = df.append( | pd.DataFrame([['Industrial', '5111']], columns=['Activity', 'Sector']) | pandas.DataFrame |
"""
Test our groupby support based on the pandas groupby tests.
"""
#
# This file is licensed under the Pandas 3 clause BSD license.
#
from sparklingpandas.test.sp_test_case import \
SparklingPandasTestCase
from pandas import bdate_range
from pandas.core.index import Index, MultiIndex
from pandas.core.api import DataFrame
from pandas.core.series import Series
from pandas.util.testing import assert_frame_equal
from pandas import compat
import pandas.util.testing as tm
import unittest2
import numpy as np
try:
# rands was moved to util.testing in pandas 0.15
from pandas.core.common import rands # pylint: disable=no-name-in-module
except ImportError:
from pandas.util.testing import rands
class PandasGroupby(SparklingPandasTestCase):
def setUp(self):
"""
Setup the dataframes used for the groupby tests derived from pandas
"""
self.date_rng = bdate_range('1/1/2005', periods=250)
self.string_idx = Index([rands(8).upper() for x in range(250)])
self.group_id = Series([x[0] for x in self.string_idx],
index=self.string_idx)
self.group_dict = dict((key, value) for key, value in
compat.iteritems(self.group_id))
self.col_idx = Index(['A', 'B', 'C', 'D', 'E'])
rand_matrix = np.random.randn(250, 5)
self.string_matrix = DataFrame(rand_matrix, columns=self.col_idx,
index=self.string_idx)
self.time_matrix = DataFrame(rand_matrix, columns=self.col_idx,
index=self.date_rng)
self.time_series = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.pd_df_foobar = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(np.random.randn(8),
dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
super(self.__class__, self).setUp()
def test_first_last_nth(self):
# tests for first / last / nth
ddf = self.psc.from_pd_data_frame(self.pd_df_foobar)
assert_frame_equal(ddf.collect(), self.pd_df_foobar)
grouped = self.psc.from_pd_data_frame(self.pd_df_foobar).groupby('A')
first = grouped.first().collect()
expected = self.pd_df_foobar.ix[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0).collect()
assert_frame_equal(nth, expected)
last = grouped.last().collect()
expected = self.pd_df_foobar.ix[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
| assert_frame_equal(last, expected) | pandas.util.testing.assert_frame_equal |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
'''
run this file from root folder:
python3 datasets/process_data.py datasets/messages.csv datasets/categories.csv datasets/DisasterResponse.db
'''
def load_data(messages_filepath, categories_filepath):
"""
PARAMETER:
messages_filepath - filepath for messages
categories_filepath - filepath for categories
RETURN:
df - merged messages and categories DataFrame
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = | pd.concat([messages, categories], axis=1) | pandas.concat |
import pickle
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from functools import reduce
import sys
import time
from sklearn.decomposition import PCA
from sklearn import cluster as sklearn_clustering
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import data_extract as dext
from heimat import reco
import settings
import cache_manager as cmng
import portals_urls
import data_cleaning
pca_u, G = None, None
pca_a, A_star = None, None
A, U, MAP, txtclf = None, None, None, None
M = None
CLF_MLP = None
CLF_DBSCAN = None
D, PAPERS_LIST = None, None
UVT = None
papers_total, objects_articles_dict, objects_df = cmng.load_work()
pca_G_ncomponents = settings.pca_G_ncomponents
pca_A_ncomponents = settings.pca_A_ncomponents
mlp_iter = settings.mlp_iter
funksvd_iter = settings.funksvd_iter
funksvd_latent_features = settings.funksvd_latent_features
pd.set_option("max_rows", 50)
np.random.seed()
def dist_em(xs1, xs2):
euklid = np.linalg.norm(xs1 - xs2)
manhattan = sum(abs(e - s) for s, e in zip(xs1, xs2))
return euklid, manhattan
def show_articles_by_group(group=0):
"""
Shows paper_id corresponding to objects in some particular group
:param group:
:return:
"""
global U
r = U[U.group == group]
articles = []
for paper_id in r['OBJID'].values:
articles.extend(MAP[paper_id])
for paper_id in list(set(articles)):
print("--------------------------------------------------")
dext.get_paper(paper_id)
def show_first3_components(matrix, title="", start_at_index=0):
"""
:param matrix: G or A_star matrices
:param title:
:param start_at_index: Depending on whether matrix is G or A_star, start_at_index differs (1, respectively 0)
:return:
"""
plt.figure(figsize=(10, 8))
ax = plt.axes(projection='3d')
i, j, k = [start_at_index + t for t in range(0, 3)]
ax.scatter3D(matrix[:, i], matrix[:, j], matrix[:, k], s=8, cmap='Greens', edgecolors='k')
if title:
plt.title(title)
plt.show()
plt.close()
time.sleep(1)
def gen_matrix_G(ncomp=25):
"""
matrix G of principal components for the object representation
- generates the PCA form of matrix U
- adds the OBJID value on the first column
:param ncomp:
:return:
"""
global pca_u, G, U
print("\n[x] PCA for matrix G:")
pca_u = PCA(n_components=ncomp)
U_matrix = U[list(filter(lambda x: x not in ["OBJID", "group"], U.columns))]
G = pca_u.fit_transform(U_matrix.fillna(U_matrix.mean()).values)
G = np.append(U['OBJID'].values.reshape(U.shape[0], 1), G, axis=1)
print("[x] Explained variance ratio:")
print(pca_u.explained_variance_ratio_)
print("[x] Singular values:")
print(pca_u.singular_values_)
print("[x] Sum of variance:")
print(np.sum(pca_u.explained_variance_ratio_))
show_first3_components(G, title="First 3 principal components for G", start_at_index=1)
def gen_matrix_A_star(ncomp=25):
"""
matrix A* of principal components for the article representation
- generates the PCA form of matrix U
- adds the OBJID value on the first column
:param ncomp:
:return:
"""
global pca_a, A_star
print("\n[x] PCA for matrix A:")
pca_a = PCA(n_components=ncomp)
A_star = pca_a.fit_transform(A.fillna(A.mean()).values[:, 1:])
A_star = np.append(A['paper_id'].values.reshape(A_star.shape[0], 1), A_star, axis=1)
print("[x] Explained variance ratio:")
print(pca_a.explained_variance_ratio_)
print("[x] Singular values:")
print(pca_a.singular_values_)
print("[x] Sum of variance:")
print(np.sum(pca_a.explained_variance_ratio_))
show_first3_components(A_star, title="First 3 principal components for A_star", start_at_index=1)
def get_indexes_articles_in_df(objid):
"""
MAP contains the mapping between astronomical object ids and the paper ids
returns the indexes in matrix A of object with objid
:param objid:
:return:
"""
global A, MAP
res = []
for paper_id in MAP[objid]:
record = A[A.paper_id == paper_id].index.values.tolist()
if len(record) != 0:
res.append(record[0])
else:
# ignoring for the moment if a paper id couldn't be found
# (probably there was an exception at download phase)
pass
return res
def gen_matrix_M(balance_factor=3):
"""
- construct matrix M by combining values from G and A_star
- since a brute force would require too much time and would lead to overly unbalanced training set
decided to build up by factor of 3 (balance_factor):
- a portion of data is "as is", thus object data in G corresponds to data in A_star (by MAP)
- a portion of data (3 times bigger) is "simulated" and contains objects to articles that are not associated
- target value is set to 1 if association is given, otherwise 0
:param balance_factor:
:return:
"""
global G, U, A_star, A
M = []
y = []
print("Building matrix M, this will take a while .. ")
for i in range(0, G.shape[0]):
if i != 0 and i % int(0.1 * G.shape[0]) == 0:
print("%.2f" % (100 * i / G.shape[0]) + "% of objects")
r1 = G[i, 1:].tolist()
object_id = U.values[i, 0]
indexes_associations = get_indexes_articles_in_df(object_id)
indexes_non_associations = list(filter(lambda k: k not in indexes_associations, range(A.shape[0])))
indexes_non_associations = pd.Series(indexes_non_associations).sample(
len(indexes_associations) * balance_factor).tolist()
for j in indexes_associations + indexes_non_associations:
r2 = A_star[j, 1:].tolist()
M.append(r1 + r2)
y.append(1 if j in indexes_associations else 0)
M = np.array(M)
return M, y
def gen_matrix_Mi(i):
"""
Generates matrix Mi, that is the portion of Matrix M given an astronomical object id OBJID found at index i in G
This is done by taking the record from G of object and combine it with all records from A_star,
so that the calculation of probability P(Association | Gi, A_star) gets calculated for all A_star papers
:param i:
:return:
"""
global U, G, A, A_star
Mi = []
yi = []
r1 = G[i, 1:].tolist()
for j in range(0, A_star.shape[0]):
object_id = U.values[i, 0].encode("utf-8")
articles_found_related = dext.objects_articles_dict[object_id]
r2 = A_star[j, 1:].tolist()
article_id = A.values[j, 0]
target_value = int(article_id in articles_found_related)
Mi.append(
r1 + r2
)
yi.append(target_value)
Mi = np.array(Mi)
return Mi, yi
def get_confusion_matrix_stats(cm, i):
"""
Given a Confusion Matrix cm, calculates precision, recall and F1 scores
:param cm: confusion matrix
:param i: position of the variable, for with the caculation be done
:return: three statistics: precision, recall and the F1-Score
"""
tp = cm[i, i]
fp = np.sum(cm[i, :]) - tp
fn = np.sum(cm[:, i]) - tp
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1_score = 2 * (precision * recall) / (precision + recall)
return precision, recall, f1_score
def check_mlp(x, y):
global CLF_MLP
print("+++++++++++++++++++++++++++++++++++++")
labels_zuordnung_mlp = CLF_MLP.classes_
beispiel_mlp_x = x
beispiel_mlp_y = y
y_true = np.array(beispiel_mlp_y)
y_pred = np.array([labels_zuordnung_mlp[np.argmax(t)] for t in CLF_MLP.predict_proba(beispiel_mlp_x)])
accuracy = (y_pred == y_true).mean()
cm = confusion_matrix(y_true, y_pred, labels=labels_zuordnung_mlp)
if True:
print("Labels:", labels_zuordnung_mlp)
print("Confusion Matrix:")
print(cm)
for i in range(0, len(cm)):
precision, recall, f1_score = get_confusion_matrix_stats(cm, i)
print("Label {} - precision {}, recall {}, f1_score {}: ".format(
i, np.round(precision, 2), np.round(recall, 2), np.round(f1_score, 2)
))
print("precision:", accuracy)
print("+++++++++++++++++++++++++++++++++++++")
def show_object_details(object_id, article_indexes, pred_df=None, topk=10):
"""
Shows associated papers for an object id according to predicted article_indexes
# U expands categorical variables, so it has a dimension larger than dext.objects_df
:param object_id:
:param article_indexes:
:param pred_df:
:param topk:
:return:
"""
global A
print("""
\nObject with ID: {}
""".format(object_id))
if pred_df is not None:
print("[x] Predicted articles in pred_df:")
print(pred_df)
objid = object_id.encode("utf-8")
url = "http://skyserver.sdss.org/dr16/en/tools/explore/Summary.aspx?id={}".format(
object_id
)
print("[x] You can check the SkyServer Explore page at: ")
print(url, "\n")
print("[x] Compact form from original object pandas dataframe (objects_df as in data_extract.py):")
print(dext.objects_df[dext.objects_df.OBJID == objid].transpose())
print("\n[x] Showing maximum Top-{}:".format(topk))
for k in range(0, min(len(article_indexes), topk)):
print("*************************************************************************************")
if pred_df is not None:
print(pred_df.iloc[k])
j = article_indexes[k]
dext.get_paper(paper_id=A.paper_id.iloc[j])
input(".....")
def apply_mlp(object_id=None):
"""
uses trained MLP classifier to calculate probability P(Bij | ui, aj) for one object_id ui and all aj
- uses construction of matrix Mi to achieve that, that is the portion of general matrix M for the object
:param object_id:
:return:
"""
global U, G, CLF_MLP
if object_id is None:
i = pd.Series(range(0, G.shape[0])).sample(10).iloc[5] # index of object id in matrices G, U
object_id = U.OBJID.iloc[i]
else:
i = U[U.OBJID == object_id].index.values.tolist()[-1]
print("\n[x] Object ID:", object_id)
Mi, yi = gen_matrix_Mi(i)
Mi = pd.DataFrame(Mi)
print("[x] The portion of M matrix, corresponding to | ui | aj |, with j in [0, A_star.shape[0]]: ")
print(Mi)
preds = [np.round(t[1], 2) for t in CLF_MLP.predict_proba(Mi.values)]
# print("\n[x] Predictions:")
# print(preds)
pred_df = pd.DataFrame(
{
"article_index": Mi.index.values.tolist(),
"mlp_proba": preds,
"associated": yi
}
)
pred_df = pred_df.sort_values(by="mlp_proba", ascending=False)
pred_df = pred_df[pred_df.mlp_proba > 0.5]
pred_df = pred_df.reset_index(drop=True)
print("\n[x] Summarised with a threshold for probabilty of 50%, that is P(Bij | ui, aj) > 0.5:")
print(pred_df)
articles_indexes = pred_df.article_index.values.tolist()
print("")
return object_id, articles_indexes, pred_df
def data_extraction():
"""
with module dext original data is accessible: papers_total, objects_articles_dict, objects_df
:return:
"""
print("[x] Extracting data and creating matrices A, U and dictionary map MAP .. ")
dext.run()
A, U, MAP, txtclf = dext.load_matrices()
return A, U, MAP, txtclf
####################### Constructing Matrix M and MLP model #######################
def construct_G_Astar_M_matrices():
"""
uses above methods to construct training data M by combining G and A_star matrices
:return:
"""
global G, A_star, M, pca_A_ncomponents, pca_G_ncomponents
print("[x] Generating PCA projections of:"
"\n- matrices U (matrix G of astronomical objects)"
"\n- and A (matrix A_star of related papers)")
gen_matrix_G(ncomp=pca_G_ncomponents)
# TODO: increase automatically pca_A_ncomponents if the explained variance drops to less than, for instance, 0.85
gen_matrix_A_star(ncomp=pca_A_ncomponents)
print("\n[x] Generating matrix M out of two parts "
"| ui | aj | target {1 if related, 0 otherwise} ")
M, y = gen_matrix_M()
M = | pd.DataFrame(M) | pandas.DataFrame |
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy import extract
from sqlalchemy import and_
from sqlalchemy import or_
from mpl_toolkits.basemap import Basemap
from flask import Flask, jsonify
engine = create_engine("sqlite:////Users/cla/Desktop/UM Data Science/Homework/10 -sqlalchemy-challenge/Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
dates = []
start_date = input(f'Start date of your trip(yyyy-mm-dd)')
end_date = input(f'End date of your trip(yyyy-mm-dd)')
for date in start_date, end_date:
split_date=date.split('-')
dates.append(split_date)
start,end = dates
start_year=(start[0]); start_month=(start[1]); start_day=(start[2])
end_year=(end[0]); end_month=(end[1]); end_day=(end[2])
app = Flask(__name__)
@app.route('/')
def home():
print(f'the possible routes are: ')
return 'Welcome to my homepage'
@app.route('/api/v1.0/precipitation')
def precipitation():
query_date = 2015-12-12
prcp = session.query(Measurement.date,Measurement.station,Measurement.prcp).filter(Measurement.date>=query_date).order_by(Measurement.date).all()
return jsonify(prcp)
@app.route('/api/v1.0/stations')
def stations():
station_names = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()
station_names = pd.DataFrame(station_names)
return jsonify(station_names)
@app.route('/api/v1.0/tobs')
def tobs():
last_date=session.query(Measurement.date).\
order_by(Measurement.date.desc()).first()
for date in last_date:
split_last_date=date.split('-')
last_year=int(split_last_date[0]); last_month=int(split_last_date[1]); last_day=int(split_last_date[2])
query_date = dt.date(last_year, last_month, last_day) - dt.timedelta(days=365)
last_year_tobs = session.query(Measurement.date,Measurement.station,Measurement.prcp).\
filter(Measurement.date>=query_date).\
order_by(Measurement.date).all()
last_year_tobs_df = | pd.DataFrame(last_year_tobs) | pandas.DataFrame |
"""
็ปK็บฟๆไปถ๏ผๅๅบ็ญ็ฅไนฐๅ
ฅๅๅบ่็นใ
"""
import os
import sys
import time
import threading
from multiprocessing import Pool, RLock, freeze_support
import numpy as np
import pandas as pd
from tqdm import tqdm
from rich import print as print
import CeLue # ไธชไบบ็ญ็ฅๆไปถ๏ผไธๅไบซ
import func_TDX
import user_config as ucfg
from pyecharts.charts import Kline, Bar, Grid
from pyecharts.globals import ThemeType
from pyecharts import options as opts
from pyecharts.commons.utils import JsCode
def markareadata(df_stock):
# ็ๆไนฐ็นๅ็นๅบๅๆ ็คบๅๆ ็น
df_celue = df_stock.loc[df_stock['celue_buy'] | df_stock['celue_sell']] # ๆๅไนฐๅ็นๅ
yAxis_max = df_stock['high'].max()
markareadata = []
temp = []
# kๆฏrange็ดขๅผ๏ผๅฏนๅบๅพๅฝข็ฌฌๅ ไธช็น,vๆฏK่ก็ๅ
ๅฎน๏ผๅญๅ
ธ็ฑปๅ
for k, v in df_celue.iterrows():
temp.append(
{
"xAxis": k,
# "yAxis": yAxis_max if v['celue_sell'] else 0, # buy็นๆฏ0๏ผsell็นๆฏๆๅคงๅผ ๅกซไบyๅๆ ไผๅฏผ่ดๅพๅฝขๆพๅคงๅๅบๅๆถๅคฑ
}
)
# ๅฆๆtempๅ่กจๆฐ้ๅฐ่พพ2๏ผ่กจ็คบ่ตท็นxyๅๆ ใ็ป็นxyๅๆ ็ๆๅฎๆฏใๆทปๅ ๅฐmarkareadata๏ผๆธ
็ฉบtemp้ๆฐๅผๅง
if len(temp) == 2:
# ็ป็ฌฌ2็ปxyๅๆ ๅญๅ
ธๆทปๅ 'itemStyle': {'color': '#14b143'}้ฎๅผๅฏนใ
# df_celue.at[temp[1]['xAxis'], 'close']ไธบ่ฏปๅๅฏนๅบ็ดขๅผ็ๆถ็ไปทใ
# ็ฌฌไบ็ปๅๆ ๆถ็ไปทๅ็ฌฌไธ็ปๅๆ ๆถ็ไปทๆฏ่พ๏ผๅคงไบๅๅบๅ้ข่ฒๆฏ็บข่ฒ่กจ็คบ็ๅฉ๏ผๅฐไบๅ็ปฟ่ฒไบๆ
temp[1]["itemStyle"] = {'color': "#ef232a" if df_celue.at[temp[1]['xAxis'], 'close'] > df_celue.at[
temp[0]['xAxis'], 'close'] else "#14b143"}
markareadata.append(temp)
# rprint(markareadata)
temp = []
return markareadata
def marklinedata(df_stock):
# ็ๆ่ถๅฟ็บฟๆฐๆฎ
import math
from func_TDX import SMA, BARSLASTCOUNT
"""
ไธไธ้ข็้่พพไฟกๅ
ฌๅผๆๆๅฎๅ
จไธ่ด๏ผ
็ฐไปท:CONST(C),COLORLIGRAY,DOTLINE;
MAA10:=MA(CLOSE,55);
้ซ็ช:=BARSLASTCOUNT(L>MAA10)=9;
ไฝ็ช:=BARSLASTCOUNT(H<MAA10)=9;
้ซ็ช็ ด:=้ซ็ช ;
ไฝ็ช็ ด:=ไฝ็ช ;
่ทไธๆฌก้ซไฝ็ฝฎ:=BARSLAST(้ซ็ช็ ด),NODRAW;
่ทไธๆฌกไฝไฝ็ฝฎ:=BARSLAST(ไฝ็ช็ ด),NODRAW;
้ซ่ฟๆปค:=(้ซ็ช็ ด AND REF(่ทไธๆฌก้ซไฝ็ฝฎ,1)>REF(่ทไธๆฌกไฝไฝ็ฝฎ,1));
ไฝ่ฟๆปค:=(ไฝ็ช็ ด AND REF(่ทไธๆฌกไฝไฝ็ฝฎ,1)>REF(่ทไธๆฌก้ซไฝ็ฝฎ,1));
้ซ0:=BACKSET(้ซ่ฟๆปค,10);
ไฝ0:=BACKSET(ไฝ่ฟๆปค,10);
้ซ1:=CROSS(้ซ0,0.5);
ไฝ1:=CROSS(ไฝ0,0.5);
่ทไธ้ซไฝ:=BARSLAST(้ซ1),NODRAW;
่ทไธไฝไฝ:=BARSLAST(ไฝ1),NODRAW;
ไฝ็น:=IF(่ทไธ้ซไฝ > ่ทไธไฝไฝ, LLV(L,่ทไธไฝไฝ+1)=L,0);
ไฝ:=FILTERX(ไฝ็น AND ่ทไธ้ซไฝ>่ทไธไฝไฝ,่ทไธไฝไฝ+1);
้ซ็น:=IF(่ทไธ้ซไฝ < ่ทไธไฝไฝ, HHV(H,่ทไธ้ซไฝ+1)=H,0);
้ซ:=FILTERX(้ซ็น AND ่ทไธไฝไฝ>่ทไธ้ซไฝ ,่ทไธ้ซไฝ+1);
NOTEXTไธๆถจ็บฟ:DRAWLINE(ไฝ AND BARSLAST(้ซ)>20,L,้ซ AND BARSLAST(ไฝ)>20,H,0),COLORRED,LINETHICK2;
NOTEXTไธ่ท็บฟ:DRAWLINE(้ซ AND BARSLAST(ไฝ)>20,H,ไฝ AND BARSLAST(้ซ)>20,L,0),COLORGREEN,LINETHICK2;
"""
df_stock['date'] = pd.to_datetime(df_stock['date'], format='%Y-%m-%d') # ่ฝฌไธบๆถ้ดๆ ผๅผ
df_stock.set_index('date', drop=False, inplace=True) # ๆถ้ดไธบ็ดขๅผใๆนไพฟไธๅฆๅคๅคๆ็DF่กจๅฏน้ฝๅๅนถ
H = df_stock['high']
L = df_stock['low']
C = df_stock['close']
TJ04_ๅ็บฟ = SMA(C, 55)
TJ04_้ซ็ช็ ด = BARSLASTCOUNT(L > TJ04_ๅ็บฟ) == 9
TJ04_ไฝ็ช็ ด = BARSLASTCOUNT(H < TJ04_ๅ็บฟ) == 9
TJ04_้ซ็ช็ ด = pd.DataFrame(TJ04_้ซ็ช็ ด.loc[TJ04_้ซ็ช็ ด == True], columns=["้ซ็ช็ ด"])
TJ04_ไฝ็ช็ ด = pd.DataFrame(TJ04_ไฝ็ช็ ด.loc[TJ04_ไฝ็ช็ ด == True], columns=["ไฝ็ช็ ด"])
TJ04_่ฟๆปค = pd.concat([TJ04_้ซ็ช็ ด, TJ04_ไฝ็ช็ ด]).fillna(value=False).sort_index()
del TJ04_ๅ็บฟ, TJ04_้ซ็ช็ ด, TJ04_ไฝ็ช็ ด
้ซ, ไฝ = 0, 0
# ่ฟๆปค้ซไฝ็ช็ ดไฟกๅทๅพช็ฏ้ป่พ๏ผๆฅๆ็ฑ่ฟๅ่ฟ๏ผ้ซไฝ็ช็ ดไฟกๅทไพๆฌกๅๅผ๏ผไฟ็ๅ่ชๆ็ธ่ฟ็ไธไธช
for index, row in TJ04_่ฟๆปค[:].iterrows():
if row['้ซ็ช็ ด'] and ้ซ == 1:
TJ04_่ฟๆปค.drop(index=index, inplace=True)
elif row['ไฝ็ช็ ด'] and ไฝ == 1:
TJ04_่ฟๆปค.drop(index=index, inplace=True)
elif row['้ซ็ช็ ด'] and ้ซ == 0:
้ซ = 1
ไฝ = 0
elif row['ไฝ็ช็ ด'] and ไฝ == 0:
้ซ = 0
ไฝ = 1
# ๅฏปๆพ้ถๆฎต้ซไฝ็น
TJ04_่ฟๆปค.reset_index(drop=False, inplace=True)
TJ04_้ซไฝ็น = pd.DataFrame()
last_day = None
for index, row in TJ04_่ฟๆปค.iterrows():
if index == 0:
last_day = row['date']
continue
elif row['้ซ็ช็ ด']:
s_date = last_day # ๆฅๆๅบ้ด่ตท็น
e_date = row['date'] # ๆฅๆๅบ้ด็ป็น
low_date = L.loc[s_date:e_date].idxmin() # ไฝ็นๆฅ
low_value = L.loc[s_date:e_date].min() # ไฝ็นๆฐๅผ
last_day = low_date
df_temp = pd.Series(data={'ไฝ็นไปทๆ ผ': low_value,
'ไฝ็นๆฅๆ': low_date,
},
name=index,
)
elif row['ไฝ็ช็ ด']:
s_date = last_day # ๆฅๆๅบ้ด่ตท็น
e_date = row['date'] # ๆฅๆๅบ้ด็ป็น
high_date = H.loc[s_date:e_date].idxmax() # ้ซ็นๆฅ
high_value = H.loc[s_date:e_date].max() # ้ซ็นๆฐๅผ
last_day = high_date
df_temp = pd.Series(data={'้ซ็นไปทๆ ผ': high_value,
'้ซ็นๆฅๆ': high_date,
},
name=index,
)
TJ04_้ซไฝ็น = TJ04_้ซไฝ็น.append(df_temp)
TJ04_้ซไฝ็น.reset_index(drop=True, inplace=True)
# ่ฝฌๆขไธบpyechartsๆ้ๆฐๆฎๆ ผๅผ
marklinedata = []
temp = []
"""
xๅๆ ๆฏๆฅๆๅฏนๅบ็ๆดๆฐๅบๅท๏ผyๅๆ ๆฏไปทๆ ผ
ๆ้ๆฐๆฎๆ ผๅผ: [[{'xAxis': ่ตท็นxๅๆ , 'yAxis': ่ตท็นyๅๆ , 'value': ็บฟ้ฟ}, {'xAxis': ็ป็นxๅๆ , 'yAxis': ็ป็นyๅๆ }]๏ผ
[{'xAxis': ่ตท็นxๅๆ , 'yAxis': ่ตท็นyๅๆ , 'value': ็บฟ้ฟ}, {'xAxis': ็ป็นxๅๆ , 'yAxis': ็ป็นyๅๆ }],
]
"""
last_day, last_value = 0, 0
for index, row in TJ04_้ซไฝ็น.iterrows():
if index == 0:
if | pd.isna(row['ไฝ็นไปทๆ ผ']) | pandas.isna |
import numpy as np
import pandas as pd
import timeit
import resource
rsrc = resource.RLIMIT_DATA
limit = int(1e9)
resource.setrlimit(rsrc, (limit, limit))
import opt_einsum as oe
| pd.set_option('display.width', 200) | pandas.set_option |
import pandas as pd
from scipy import stats
import numpy as np
import math
import os
import sys
import json, csv
import itertools as it
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import scikit_posthocs
from statsmodels.sandbox.stats.multicomp import multipletests
from collections import OrderedDict
from sklearn.metrics import r2_score
from scipy.stats import distributions
from scipy.stats.stats import find_repeats
import warnings
def wilcoxon(x, y=None, zero_method="wilcox", correction=False,
alternative="two-sided"):
"""
scipy stats function https://github.com/scipy/scipy/blob/v1.2.1/scipy/stats/morestats.py#L2709-L2806
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
Either the first set of measurements (in which case `y` is the second
set of measurements), or the differences between two sets of
measurements (in which case `y` is not to be specified.) Must be
one-dimensional.
y : array_like, optional
Either the second set of measurements (if `x` is the first set of
measurements), or not specified (if `x` is the differences between
two sets of measurements.) Must be one-dimensional.
zero_method : {'pratt', 'wilcox', 'zsplit'}, optional
The following options are available (default is 'wilcox'):
* 'pratt': Includes zero-differences in the ranking process,
but drops the ranks of the zeros, see [4]_, (more conservative).
* 'wilcox': Discards all zero-differences, the default.
* 'zsplit': Includes zero-differences in the ranking process and
split the zero rank between positive and negative ones.
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
alternative : {"two-sided", "greater", "less"}, optional
The alternative hypothesis to be tested, see Notes. Default is
"two-sided".
Returns
-------
statistic : float
If `alternative` is "two-sided", the sum of the ranks of the
differences above or below zero, whichever is smaller.
Otherwise the sum of the ranks of the differences above zero.
pvalue : float
The p-value for the test depending on `alternative`.
See Also
--------
kruskal, mannwhitneyu
Notes
-----
The test has been introduced in [4]_. Given n independent samples
(xi, yi) from a bivariate distribution (i.e. paired samples),
it computes the differences di = xi - yi. One assumption of the test
is that the differences are symmetric, see [2]_.
The two-sided test has the null hypothesis that the median of the
differences is zero against the alternative that it is different from
zero. The one-sided test has the null hypothesis that the median is
positive against the alternative that it is negative
(``alternative == 'less'``), or vice versa (``alternative == 'greater.'``).
The test uses a normal approximation to derive the p-value (if
``zero_method == 'pratt'``, the approximation is adjusted as in [5]_).
A typical rule is to require that n > 20 ([2]_, p. 383). For smaller n,
exact tables can be used to find critical values.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
.. [2] <NAME>., Practical Nonparametric Statistics, 1971.
.. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed
Rank Procedures, Journal of the American Statistical Association,
Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`
.. [4] <NAME>., Individual Comparisons by Ranking Methods,
Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`
.. [5] <NAME>., The Normal Approximation to the Signed-Rank
Sampling Distribution When Zero Differences are Present,
Journal of the American Statistical Association, Vol. 62, 1967,
pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`
Examples
--------
In [4]_, the differences in height between cross- and self-fertilized
corn plants is given as follows:
>>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]
Cross-fertilized plants appear to be be higher. To test the null
hypothesis that there is no height difference, we can apply the
two-sided test:
>>> from scipy.stats import wilcoxon
>>> w, p = wilcoxon(d)
>>> w, p
(24.0, 0.04088813291185591)
Hence, we would reject the null hypothesis at a confidence level of 5%,
concluding that there is a difference in height between the groups.
To confirm that the median of the differences can be assumed to be
positive, we use:
>>> w, p = wilcoxon(d, alternative='greater')
>>> w, p
(96.0, 0.020444066455927955)
This shows that the null hypothesis that the median is negative can be
rejected at a confidence level of 5% in favor of the alternative that
the median is greater than zero. The p-value based on the approximation
is within the range of 0.019 and 0.054 given in [2]_.
Note that the statistic changed to 96 in the one-sided case (the sum
of ranks of positive differences) whereas it is 24 in the two-sided
case (the minimum of sum of ranks above and below zero).
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if alternative not in ["two-sided", "less", "greater"]:
raise ValueError("Alternative must be either 'two-sided', "
"'greater' or 'less'")
if y is None:
d = np.asarray(x)
if d.ndim > 1:
raise ValueError('Sample x must be one-dimensional.')
else:
x, y = map(np.asarray, (x, y))
if x.ndim > 1 or y.ndim > 1:
raise ValueError('Samples x and y must be one-dimensional.')
if len(x) != len(y):
raise ValueError('The samples x and y must have the same length.')
d = x - y
if zero_method in ["wilcox", "pratt"]:
n_zero = np.sum(d == 0, axis=0)
if n_zero == len(d):
raise ValueError("zero_method 'wilcox' and 'pratt' do not work if "
"the x - y is zero for all elements.")
if zero_method == "wilcox":
# Keep all non-zero differences
d = np.compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
# return min for two-sided test, but r_plus for one-sided test
# the literature is not consistent here
# r_plus is more informative since r_plus + r_minus = count*(count+1)/2,
# i.e. the sum of the ranks, so r_minus and the min can be inferred
# (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.)
# [3] uses the r_plus for the one-sided test, keep min for two-sided test
# to keep backwards compatibility
if alternative == "two-sided":
T = min(r_plus, r_minus)
else:
T = r_plus
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
# normal approximation needs to be adjusted, see Cureton (1967)
mn -= n_zero * (n_zero + 1.) * 0.25
se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = np.sqrt(se / 24)
# apply continuity correction if applicable
d = 0
if correction:
if alternative == "two-sided":
d = 0.5 * np.sign(T - mn)
elif alternative == "less":
d = -0.5
else:
d = 0.5
# compute statistic and p-value using normal approximation
z = (T - mn - d) / se
if alternative == "two-sided":
prob = 2. * distributions.norm.sf(abs(z))
elif alternative == "greater":
# large T = r_plus indicates x is greater than y; i.e.
# accept alternative in that case and return small p-value (sf)
prob = distributions.norm.sf(z)
else:
prob = distributions.norm.cdf(z)
return T, prob, z
def get_effect_size_text(effect_size):
if effect_size == None:
effect_name = "unknown"
elif 0.1 <= effect_size < 0.25:
effect_name = "uphill weak"
elif 0.25 <= effect_size < 0.4:
effect_name = "uphill moderate"
elif effect_size >= 0.4:
effect_name = "uphill strong"
elif -0.1 >= effect_size > -0.25:
effect_name = "downhill weak"
elif -0.25 >= effect_size > -0.4:
effect_name = "downhill moderate"
elif effect_size <= -0.4:
effect_name = "downhill strong"
else:
effect_name = "unsure"
return effect_name
def get_p_value_stars(p_value):
if p_value <= 0.01:
return "***"
elif p_value <= 0.05:
return "**"
elif p_value <= 0.1:
return "*"
else:
return ""
def get_result_sent(test_name, feature_name, corpus_name, p_value, n_complex, avg_complex, sd_complex, n_simple, avg_simple, sd_simple, df, t_value, effect_size, p_threshold=0.05, only_relevant=False):
effect_name = get_effect_size_text(effect_size)
if 0 <= p_value <= p_threshold:
is_significant = "a"
p_value_text = "p<="+str(p_threshold)
else:
is_significant = "no"
p_value_text = "p>"+str(p_threshold)
if test_name == "No test" or effect_size == None:
return "The average of {} for complex sentences is {} (SD={}, n={}) and for simple sentences {} (SD={}).".format(feature_name, round(avg_complex,2), round(sd_complex, 2), n_complex, round(avg_simple, 2), round(sd_simple, 2))
if only_relevant:
if p_value > p_threshold or effect_size == None or effect_size < 0.1:
return None
return "A {} was conducted to compare {} in the {} corpus. " \
"There is {} significant ({}) difference in the scores for complex (n={}, M={}, SD={}) and " \
"simplified (n={}, M={}, SD={}) sentences, t({})={}. " \
"These results that the simplification level has a {} effect (r={}) on {}.\n".format(test_name, feature_name,
corpus_name, is_significant,
p_value_text, n_complex, round(avg_complex,2),
round(sd_complex,2), n_simple, round(avg_simple,2),
round(sd_simple,2), df, round(t_value,2),
effect_name, round(effect_size,2),
feature_name)
def get_variable_names(col_names, feat_dict_path="feature_dict_checked.json", comparable=False, paired=True, difference=False):
if comparable:
return sorted(list(set(["_".join(col.split("_")[:-1]) for col in col_names if col.endswith("_complex") or col.endswith("_simple")])))
elif paired:
return sorted([col for col in col_names if col.endswith("_paired")])
elif difference:
return sorted([col for col in col_names if col.endswith("_diff")])
else:
return sorted(list(col_names))
def add_difference_features(input_data):
comparable_names = get_variable_names(input_data.columns.values, comparable=True, paired=False)
for feat in comparable_names:
input_data[feat+"_diff"] = input_data[feat+"_complex"].astype(np.float) - input_data[feat+"_simple"].astype(np.float)
return input_data
def change_dtype(input_data, col_names, comparable=True):
if comparable:
old_names = col_names
col_names = list()
for col in old_names:
col_names.append(col+"_complex")
col_names.append(col+"_simple")
# do_statistics.py:409: DtypeWarning: Columns (54,55,56,60,61,62) have mixed types. Specify dtype option on import or set low_memory=False.
# en newsela 2015
input_data.replace(False, 0, inplace=True)
input_data.replace("False", 0, inplace=True)
input_data.replace(True, 1, inplace=True)
input_data.replace("True", 1, inplace=True)
input_data[col_names] = input_data[col_names].apply(pd.to_numeric)
return input_data
def test_distribution_null_hypothesis(complex_values, simple_values, independent, feat_name, dict_path="feature_dict_checked.json"):
complex_values = complex_values[complex_values.notnull()]
simple_values = simple_values[simple_values.notnull()]
# todo: remove if all values 0 or nan
if len(complex_values) == 0 or len(simple_values) == 0 or \
(complex_values == 0).sum() == len(complex_values) or \
(simple_values == 0).sum() == len(simple_values) or \
list(complex_values) == list(simple_values):
return ("0", 0, 0, None)
# # 0: nominal, 1: ordinal, 2: interval, 3: ratio
# scale_of_measurement = check_scale(complex_values)
scale_of_measurement = check_scale_from_dict(dict_path, "comparable", feat_name)
normal_distribution = check_distribution([complex_values, simple_values], p_threshold=0.05)
variance_homogeneity = check_variance_homogeneity([complex_values, simple_values], p_threshold=0.05)
if scale_of_measurement >= 2 and normal_distribution and variance_homogeneity and independent:
t_value, p_value = stats.ttest_ind(complex_values, simple_values, equal_var=True)
effect_size = abs(math.sqrt(t_value ** 2 / (t_value ** 2 + min(complex_values, simple_values) - 1)))
return ("Student's t-test", t_value, p_value, effect_size)
elif scale_of_measurement >= 2 and normal_distribution and not variance_homogeneity and independent:
t_value, p_value = stats.ttest_ind(complex_values, simple_values, equal_var=False)
effect_size = abs(math.sqrt(t_value ** 2 / (t_value ** 2 + min(complex_values, simple_values) - 1)))
return ("Welch's t-test", t_value, p_value, effect_size)
elif scale_of_measurement >= 1 and independent:
t_value, p_value = stats.mannwhitneyu(complex_values, simple_values)
#effect_size = get_effect_size(t_value, min(len(complex_values), len(simple_values)))
return ("MannโWhitney U test", t_value, p_value, None)
elif scale_of_measurement >= 2 and normal_distribution and variance_homogeneity and not independent:
t_value, p_value = stats.ttest_rel(complex_values, simple_values)
# effect_size = abs(math.sqrt(t_value**2/(t_value**2+min(complex_values, simple_values)-1)))
effect_size = stats.pearsonr(complex_values, simple_values)[0]
return ("Student's t-test", t_value, p_value, effect_size)
elif scale_of_measurement >= 1 and not independent:
if len(complex_values) != len(simple_values):
return ("No test", np.mean(complex_values), np.mean(simple_values), None)
t_value, p_value, z_value = wilcoxon(complex_values, simple_values)
effect_size = abs(z_value/math.sqrt(min(len(complex_values), len(simple_values))))
#effect_size = stats.pearsonr(complex_values, simple_values)[0]
return ("Wilcoxon signed-rank test", t_value, p_value, effect_size)
else:
# todo name only distribution of values?
return ("No test", np.mean(complex_values), np.mean(simple_values), None)
def posthoc_dunn_z(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] <NAME> (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] <NAME> (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn_z(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
#p_value = 2. * ss.norm.sf(np.abs(z_value))
return z_value
x, _val_col, _group_col = scikit_posthocs.__convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = pd.Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn_z(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return pd.DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def compare_languages(list_lang_results, feat_name, list_corpus_names, p_threshold=0.05, dict_path="feature_dict_checked.json"):
list_lang_no_nan = list()
corpus_names = OrderedDict()
for lang_values, corpus_name in zip(list_lang_results, list_corpus_names):
no_nans = lang_values[lang_values.notnull()]
if len(no_nans) > 0:
list_lang_no_nan.append(no_nans)
corpus_names[corpus_name] = len(no_nans)
if len(list_lang_no_nan) == 0:
return 0,0
# scale_of_measurement = check_scale(list_lang_no_nan[0])
scale_of_measurement = check_scale_from_dict(dict_path, "paired", feat_name)
# # 0: nominal, 1: ordinal, 2: interval, 3: ratio
normal_distribution = check_distribution(list_lang_no_nan, p_threshold=0.05)
variance_homogeneity = check_variance_homogeneity(list_lang_no_nan, p_threshold=0.05)
if scale_of_measurement >= 2 and normal_distribution and variance_homogeneity:
# does the language affect the value of the feature? Does simplifications for each langauge work similar?
t_value, p_value = stats.f_oneway(*list_lang_no_nan)
return ("ANOVA", p_value)
#if p_value <= p_threshold:
# posthoc: which langauges are different?
# stats.multicomp.pairwise_tukeyhsd
# if two different ones found, use pearson to get effect size
#effect_size = stats.pearsonr(complex_values, simple_values)[0]
# effec_size = cohend(complex_values, simple_values)
elif scale_of_measurement >= 1:
try:
h_statistic, p_value = stats.kruskal(*list_lang_no_nan)
except ValueError:
return 0,0
if 0 < p_value <= p_threshold:
if p_value <= 0.01:
p_value = "p<=.01"
elif p_value <= 0.05:
p_value = "p<=.05"
else:
p_value = "p>0.05"
output_list = list()
posthoc_frame = scikit_posthocs.posthoc_dunn(list_lang_no_nan, p_adjust="holm")
posthoc_frame_z = posthoc_dunn_z(list_lang_no_nan)
for i, name_corpus_col in zip(posthoc_frame.columns.values, corpus_names.keys()):
for n, name_corpus_row in zip(range(0, len(posthoc_frame)), corpus_names.keys()):
if p_threshold >= posthoc_frame.iloc[n][i] > 0:
effect_size = abs(posthoc_frame_z.iloc[n][i]/math.sqrt(corpus_names[name_corpus_col]+corpus_names[name_corpus_row]))
if effect_size >= 0.1:
output_list.append(["Kruskal ", p_value, "effectsize", str(round(effect_size, 4)),
"h", str(round(h_statistic, 4)), "z", str(round(posthoc_frame_z.iloc[n][i],4)), name_corpus_col, name_corpus_row])
#pos_col = list(corpus_names.keys()).index(name_corpus_col)
#pos_row = list(corpus_names.keys()).index(name_corpus_row)
#effect_size_pearson = stats.pearsonr(list_lang_no_nan[pos_col], list_lang_no_nan[pos_row])[0]
# print(len(list_lang_no_nan[pos_col]), len(list_lang_no_nan[pos_row]))
# effect_size_cohen = cohend(list_lang_no_nan[pos_col], list_lang_no_nan[pos_row])
return output_list
else:
return 0, 0
else:
return 0, 0
def cohend(d1, d2):
# code from here https://machinelearningmastery.com/effect-size-measures-in-python/
# calculate the size of samples
n1, n2 = len(d1), len(d2)
# calculate the variance of the samples
s1, s2 = np.var(d1, ddof=1), np.var(d2, ddof=1)
# calculate the pooled standard deviation
s = math.sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2))
# calculate the means of the samples
u1, u2 = np.mean(d1), np.mean(d2)
# calculate the effect size
return (u1 - u2) / s
def get_descriptive_values(input_values):
input_values = input_values[input_values.notnull()]
return len(input_values), np.mean(input_values), np.std(input_values)
def get_effect_size(z_value, n):
return abs(z_value/math.sqrt(n))
def scale_value_to_text(value):
dict_scale = {0: "nominal", 1: "ordinal", 2: "interval", 3: "ratio"}
return dict_scale[value]
def check_scale(input_series):
# 0: nominal, 1: ordinal, 2: interval, 3: ratio
# enough to check one scale because both have equal values
if len(set(input_series).difference({0,1})) == 0: #input_series.all() in [0, 1]:
return 0
elif all(0 <= i <= 1 for i in input_series):
return 3
else:
return 1
# if len(values.difference({0,1})) <= 1:
# # including nan value
# return "nominal"
# else:
# return "interval"
def check_scale_from_dict(dict_path, comparable_or_paired, feat_name):
with open(dict_path) as f:
data = json.load(f)
if feat_name in data[comparable_or_paired].keys():
return data[comparable_or_paired][feat_name]["measurement_scale"]
else:
#print(feat_name, " no information in feature dict provided.")
return 1
def check_distribution(list_series, p_threshold=0.05):
normal_distribution = False
for input_series in list_series:
w, p_value = stats.shapiro(input_series)
if p_value >= p_threshold:
# if significant no normal distribution, hence p_value must be greater or equal to threshold
normal_distribution = True
else:
normal_distribution = False
return normal_distribution
def check_variance_homogeneity(list_values, p_threshold=0.05):
w, p_value = stats.levene(*list_values)
if p_value >= p_threshold:
# if significant then the values are heterogeneous, hence p_value must be greater or equal to threshold
return True
else:
return False
def strong_effect_bold(val):
# bold = 'bold' if not isinstance(val, str) and float(val) >= 0.5 else ''
# return 'font-weight: %s' % bold
if isinstance(val, str):
color = 'black'
elif float(val) >= 0.4:
color = "darkblue"
elif float(val) >= 0.25:
color = "darkgreen"
else:
color = "violet"
return 'color: %s' % color
def get_effect_stars(p_val, effect_size, p_threshold=0.05):
if p_val <= p_threshold:
if effect_size >= 0.4:
return "***"
elif effect_size >= 0.25:
return "**"
elif effect_size >= 0.1:
return "*"
else:
return ""
else:
return ""
def get_statistics(input_data, comparable_col_names, paired_col_names, corpus_name, output_file_text, output_file_descriptive_table, output_file_effect_table, p_threshold=0.05, key=""):
result_sents = list()
result_table = pd.DataFrame(columns=["feature", corpus_name])
columns_descr = pd.MultiIndex.from_tuples(
[("feature", ""), (corpus_name, "complex"), (corpus_name, "simple"),(corpus_name, "effect size")])
#columns_descr = pd.MultiIndex.from_tuples([("feature", ""), (corpus_name, "N"), (corpus_name, "AVG (SD) complex"), (corpus_name, "AVG (SD) simple"), ("effect_size", "")])
#[["feature", corpus_name], ["", "N", "AVG (SD) complex", "AVG (SD) simple"]])
descriptive_table = pd.DataFrame(columns=columns_descr)
columns_descr_paired = pd.MultiIndex.from_tuples([("feature", ""), (corpus_name, "N"), (corpus_name, "AVG paired"), (corpus_name, "SD paired")])
descriptive_table_paired = pd.DataFrame(columns=columns_descr_paired)
# print(input_data.describe())
# print(comparable_col_names)
for i, col in enumerate(comparable_col_names):
#if col in ["check_if_head_is_noun", "check_if_head_is_verb", "check_if_one_child_of_root_is_subject", "check_passive_voice",
# "count_characters", "count_sentences", "count_syllables_in_sentence", "get_average_length_NP",
# "get_average_length_VP", "get_avg_length_PP", "get_ratio_named_entities",
# "get_ratio_of_interjections", "get_ratio_of_particles", "get_ratio_of_symbols",
# "get_ratio_referential", "is_non_projective"]:
# continue
# print(col, corpus_name, len(input_data[input_data[col+"_complex"].notnull()]), len(input_data[input_data[col+"_simple"].notnull()]))
test_name, t_value, p_value, effect_size = test_distribution_null_hypothesis(input_data[col+"_complex"], input_data[col+"_simple"], False, col)
n_complex, avg_complex, sd_complex = get_descriptive_values(input_data[col+"_complex"])
n_simple, avg_simple, sd_simple = get_descriptive_values(input_data[col + "_simple"])
# print(col, test_name, t_value, p_value, effect_size, "complex", n_complex, avg_complex, sd_complex, "simple", n_simple, avg_simple, sd_simple)
result_sent = get_result_sent(test_name, col, corpus_name, p_value, n_complex, avg_complex, sd_complex, n_simple, avg_simple, sd_simple, min(n_complex, n_simple)-1, t_value, effect_size, p_threshold=0.05, only_relevant=True)
if result_sent:
result_sents.append(result_sent)
if effect_size == None:
effect_size = 0
if p_value > p_threshold or effect_size < 0.1:
result_table.loc[i] = [col, ""]
else:
result_table.loc[i] = [col, str(round(effect_size,2))+get_p_value_stars(p_value)]
descriptive_table.loc[i] = [col, str(round(avg_complex, 2))+"$\pm$"+str(round(sd_complex,2))+"", str(round(avg_simple, 2))+"$\pm$"+str(round(sd_simple,2))+"", get_effect_stars(p_value, effect_size, p_threshold=0.05)]
descriptive_table.loc[i+1] = ["N", "", n_complex, ""]
for n, col in enumerate(paired_col_names):
n_paired, avg_paired, sd_paired = get_descriptive_values(input_data[col])
# print(col, test_name, t_value, p_value, effect_size, "complex", n_complex, avg_complex, sd_complex, "simple", n_simple, avg_simple, sd_simple)
descriptive_table_paired.loc[n] = [col, n_paired,
round(avg_paired, 2), "$\pm$" + str(round(sd_paired, 2))]
if output_file_text:
with open(output_file_text, "w+") as f:
f.writelines(result_sents)
with open(output_file_effect_table, "w+") as f:
f.write(result_table.to_latex(index=False, escape=False)+"\n\n")
result_table.set_index("feature")
# result_table_excel = result_table.style.applymap(strong_effect_bold)
# result_table_excel.to_excel(corpus_name+'styled.xlsx', engine='openpyxl')
# if output_file_table:
with open(output_file_descriptive_table, "w+") as f:
f.write(descriptive_table.to_latex(index=False, escape=False))
return input_data, descriptive_table, result_table, descriptive_table_paired
def save_results(concat_descr, concat_effect, concat_descr_paired, output_descr_paired, type_value=""):
type_value_dir = ""
if type_value:
if not os.path.exists("data/results/"+type_value):
os.makedirs("data/results/"+type_value)
type_value_dir = type_value+"/"
type_value = "_"+type_value
with open("data/results/"+type_value_dir+"all_descr_results"+type_value+".txt", "w") as f:
f.write(concat_descr.to_latex(index=False, escape=False))
with open("data/results/"+type_value_dir+"all_descr_results"+type_value+".csv", "w") as f:
f.write(concat_descr.to_csv(index=False))
with open("data/results/"+type_value_dir+"all_effect_results"+type_value+".txt", "w") as f:
f.write(concat_effect.to_latex(index=False, escape=False))
with open("data/results/"+type_value_dir+"all_effect_results"+type_value+".csv", "w") as f:
f.write(concat_effect.to_csv(index=False))
with open("data/results/"+type_value_dir+"all_descr_paired_results"+type_value+".txt", "w") as f:
f.write(concat_descr_paired.to_latex(index=False, escape=False))
with open("data/results/"+type_value_dir+"all_descr_paired_results.csv", "w") as f:
f.write(concat_descr_paired.to_csv(index=False))
with open("data/results/"+type_value_dir+"all_effect_paired_results"+type_value+".txt", "w") as f:
f.write(output_descr_paired)
return 1
def get_feature_dict(result_files):
list_lang_input = list()
for input_file in result_files:
input_data = pd.read_csv("data/ALL/"+input_file, sep="\t", header=0, warn_bad_lines=True, error_bad_lines=False)
# input_data = add_difference_features(input_data)
list_lang_input.append(input_data)
feature_dict = {"paired": {}, "comparable": {}}
for input_data in list_lang_input:
for feat in get_variable_names(input_data.columns.values, paired=True, comparable=False):
if feat not in feature_dict["paired"].keys():
feature_dict["paired"][feat] = {"description": "", "measurement_scale": check_scale(input_data[feat]),
"measurement_scale_text": scale_value_to_text(check_scale(input_data[feat])),
"min": min(input_data[feat]), "max": max(input_data[feat]),
"type": ""}
else:
if min(input_data[feat]) < feature_dict["paired"][feat]["min"]:
feature_dict["paired"][feat]["min"] = min(input_data[feat])
if max(input_data[feat]) > feature_dict["paired"][feat]["max"]:
feature_dict["paired"][feat]["max"] = max(input_data[feat])
if feature_dict["paired"][feat]["measurement_scale"] < check_scale(input_data[feat]) < 3:
feature_dict["paired"][feat]["measurement_scale"] = check_scale(input_data[feat])
feature_dict["paired"][feat]["measurement_scale_text"] = scale_value_to_text(feature_dict["paired"][feat]["measurement_scale"])
for feat in get_variable_names(input_data.columns.values, paired=False, comparable=True):
if feat not in feature_dict["comparable"].keys():
feature_dict["comparable"][feat] = {"description": "",
"measurement_scale_text": scale_value_to_text(max(check_scale(input_data[feat + "_complex"]), check_scale(input_data[feat + "_simple"]))),
"measurement_scale": max(check_scale(input_data[feat + "_complex"]), check_scale(input_data[feat + "_simple"])),
"min": min(min(input_data[feat+"_complex"]), min(input_data[feat+"_simple"])),
"max": max(max(input_data[feat+"_complex"]), max(input_data[feat+"_simple"])),
"type": ""}
else:
if min(input_data[feat+"_complex"]) < feature_dict["comparable"][feat]["min"]:
feature_dict["comparable"][feat]["min"] = min(input_data[feat+"_complex"])
if min(input_data[feat+"_simple"]) < feature_dict["comparable"][feat]["min"]:
feature_dict["comparable"][feat]["min"] = min(input_data[feat+"_simple"])
if max(input_data[feat+"_complex"]) > feature_dict["comparable"][feat]["max"]:
feature_dict["comparable"][feat]["max"] = max(input_data[feat+"_complex"])
if max(input_data[feat+"_simple"]) > feature_dict["comparable"][feat]["max"]:
feature_dict["comparable"][feat]["max"] = max(input_data[feat+"_simple"])
if feature_dict["comparable"][feat]["measurement_scale"] < max(check_scale(input_data[feat + "_complex"]), check_scale(input_data[feat + "_simple"])) < 3:
feature_dict["comparable"][feat]["measurement_scale"] = max(check_scale(input_data[feat + "_complex"]), check_scale(input_data[feat + "_simple"]))
feature_dict["comparable"][feat]["measurement_scale_text"] = scale_value_to_text(feature_dict["comparable"][feat]["measurement_scale"])
with open("feature_dict.json", "w+") as f:
json.dump(feature_dict, f, sort_keys=True, indent=4)
return feature_dict
def preprocess_input_data(input_file):
input_data = pd.read_csv("data/ALL/"+input_file, sep="\t", header=0, warn_bad_lines=True, error_bad_lines=False,
quoting=csv.QUOTE_NONE, encoding='utf-8')
comparable_col_names = get_variable_names(input_data.columns.values, comparable=True, paired=False)
input_data = add_difference_features(input_data)
paired_col_names = get_variable_names(input_data.columns.values, paired=True, comparable=False)
paired_col_names = paired_col_names + get_variable_names(input_data.columns.values, paired=False, comparable=False,
difference=True)
input_data = change_dtype(input_data, comparable_col_names, comparable=True)
input_data = change_dtype(input_data, paired_col_names, comparable=False)
return input_data, comparable_col_names, paired_col_names
def stack_corpora(results_files):
stacked_data, comparable_col_names, paired_col_names, corpus_name = "", "", "", ""
for f, input_file in enumerate(results_files):
corpus_name = input_file.split("/")[-1][:-4]
input_data, comparable_col_names, paired_col_names = preprocess_input_data(input_file)
if f == 0:
stacked_data = input_data
else:
stacked_data = pd.concat([stacked_data, input_data])
return stacked_data, comparable_col_names, paired_col_names, corpus_name
def get_statistics_for_stacked_domains(file_dict):
for key in file_dict.keys():
get_statistics_for_stacked_corpora(file_dict[key], key)
return 1
def get_paired_statistics_for_crossdata(file_dict, cross_type="domain"):
list_lang_input, corpus_names = list(), list()
for f, corpus_name in enumerate(file_dict.keys()):
print(corpus_name)
stacked_data, comparable_col_names, paired_col_names, corpus_name = stack_corpora(file_dict[corpus_name])
stacked_data, corpus_descr, corpus_effect, corpus_descr_paired = get_statistics(stacked_data, comparable_col_names,
paired_col_names, corpus_name,
"data/results/" + corpus_name + "_sent_results.txt",
"data/results/" + corpus_name + "_descr_results.txt",
"data/results/" + corpus_name + "_effect_results.txt")
list_lang_input.append(stacked_data)
corpus_names.append(corpus_name)
if f == 0:
concat_descr = corpus_descr
concat_effect = corpus_effect
concat_descr_paired = corpus_descr_paired
else:
corpus_effect = corpus_effect.drop(['feature'], axis=1)
corpus_descr = corpus_descr.drop('feature', axis=1, level=0)
corpus_descr_paired = corpus_descr_paired.drop('feature', axis=1, level=0)
concat_descr = pd.concat([concat_descr, corpus_descr], axis=1)
concat_effect = pd.concat([concat_effect, corpus_effect], axis=1)
concat_descr_paired = pd.concat([concat_descr_paired, corpus_descr_paired], axis=1)
paired_col_names = get_variable_names(list_lang_input[0].columns.values, paired=True, comparable=False)
paired_col_names = paired_col_names + get_variable_names(list_lang_input[0].columns.values, paired=False, comparable=False, difference=True)
output_descr_paired = ""
for col in paired_col_names:
# print(col, len([lang_input[col] for lang_input in list_lang_input]))
result = compare_languages([lang_input[col] for lang_input in list_lang_input], feat_name=col, list_corpus_names=corpus_names, p_threshold=0.05)
if type(result) == list:
for res in result:
output_descr_paired += col + " " + " ".join(res)+ "\n"
elif result[1] <= 0.05 and result[1] > 0.0:
output_descr_paired += col+" " + result[0] + " " + str(result[1]) + "\n"
save_results(concat_descr, concat_effect, concat_descr_paired, output_descr_paired, type_value=cross_type)
return 1
def get_statistics_for_stacked_corpora(results_files, key="stacked_corpora"):
"""for f, input_file in enumerate(results_files):
corpus_name = input_file.split("/")[-1][:-4]
input_data, comparable_col_names, paired_col_names = preprocess_input_data(input_file)
if f == 0:
stacked_data = input_data
else:
stacked_data = pd.concat([stacked_data, input_data])"""
stacked_data, comparable_col_names, paired_col_names, corpus_name = stack_corpora(results_files)
key_dir = ""
if key:
key_dir = key+"/"
if not os.path.exists("data/results/"+key):
os.makedirs("data/results/"+key)
input_data, corpus_descr, corpus_effect, corpus_descr_paired = get_statistics(stacked_data, comparable_col_names,
paired_col_names, corpus_name,
"data/results/"+key_dir+"sent_results_stacked_"+key+".txt",
"data/results/"+key_dir+"descr_results_stacked_"+key+".txt",
"data/results/"+key_dir+"effect_results_stacked_"+key+".txt")
return input_data, corpus_descr, corpus_effect, corpus_descr_paired
def get_statistics_for_all_corpora(result_files, type_value=""):
list_lang_input = list()
corpus_names = list()
type_value_dir = ""
if type_value:
type_value_dir = type_value+"/"
if not os.path.exists("data/results/"+type_value):
os.makedirs("data/results/"+type_value)
for f, input_file in enumerate(result_files):
corpus_name = input_file.split("/")[-1][:-4]
print(input_file)
input_data, comparable_col_names, paired_col_names = preprocess_input_data(input_file)
input_data, corpus_descr, corpus_effect, corpus_descr_paired = get_statistics(input_data, comparable_col_names,
paired_col_names, corpus_name,
"data/results/"+ type_value_dir + corpus_name + "_sent_results.txt",
"data/results/" + type_value_dir + corpus_name + "_descr_results.txt",
"data/results/" + type_value_dir + corpus_name + "_effect_results.txt")
list_lang_input.append(input_data)
corpus_names.append(corpus_name)
if f == 0:
concat_descr = corpus_descr
concat_effect = corpus_effect
concat_descr_paired = corpus_descr_paired
else:
corpus_effect = corpus_effect.drop(['feature'], axis=1)
corpus_descr = corpus_descr.drop('feature', axis=1, level=0)
corpus_descr_paired = corpus_descr_paired.drop('feature', axis=1, level=0)
concat_descr = pd.concat([concat_descr, corpus_descr], axis=1)
concat_effect = pd.concat([concat_effect, corpus_effect], axis=1)
concat_descr_paired = pd.concat([concat_descr_paired, corpus_descr_paired], axis=1)
paired_col_names = get_variable_names(list_lang_input[0].columns.values, paired=True, comparable=False)
paired_col_names = paired_col_names + get_variable_names(list_lang_input[0].columns.values, paired=False, comparable=False, difference=True)
output_descr_paired = ""
for col in paired_col_names:
# print(col, len([lang_input[col] for lang_input in list_lang_input]))
result = compare_languages([lang_input[col] for lang_input in list_lang_input], feat_name=col, list_corpus_names=corpus_names, p_threshold=0.05)
if type(result) == list:
for res in result:
output_descr_paired += col + " " + " ".join(res)+ "\n"
elif result[1] <= 0.05 and result[1] > 0.0:
output_descr_paired += col+" " + result[0] + " " + str(result[1]) + "\n"
save_results(concat_descr, concat_effect, concat_descr_paired, output_descr_paired, type_value=type_value)
return 1
def logistic_regression_model(result_files, output_name, complete=False):
r2_value = 0
if complete:
output_frame = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_contains(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert p in idx0
assert str(p) in idx0
assert "2017-09-01 00:00:01" in idx0
assert "2017-09" in idx0
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx0 = pd.PeriodIndex([p0, p1, p2])
input0 = np.array([1, 2, 3])
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
input1 = np.array([1, 2, 3])
expected1 = np.array([1, 2])
result1 = idx1.get_value(input1, p1)
tm.assert_numpy_array_equal(result1, expected1)
idx2 = pd.PeriodIndex([p1, p2, p1])
input2 = np.array([1, 2, 3])
expected2 = np.array([1, 3])
result2 = idx2.get_value(input2, p1)
tm.assert_numpy_array_equal(result2, expected2)
def test_get_indexer(self):
# GH 17717
p1 = pd.Period("2017-09-01")
p2 = pd.Period("2017-09-04")
p3 = pd.Period("2017-09-07")
tp0 = pd.Period("2017-08-31")
tp1 = | pd.Period("2017-09-02") | pandas.Period |
# TO DO
# 1. Fair probability
# 2. Hedge opportunities
# 3. Datapane map
# 4. Change since prior poll
# Import modules
import json
import requests
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
pd.set_option('display.max_rows', None) #print all rows without truncating
pd.options.mode.chained_assignment = None #hide SettingWithCopyWarning
import numpy as np
import datetime
import os
import zipfile #Economist
import urllib.request #Economist
# Pull in market data from PredictIt's API
Predictit_URL = "https://www.predictit.org/api/marketdata/all/"
Predictit_response = requests.get(Predictit_URL)
jsondata = Predictit_response.json()
# Replace null values with zero
def dict_clean(items):
result = {}
for key, value in items:
if value is None:
value = 0
result[key] = value
return result
dict_str = json.dumps(jsondata)
jsondata = json.loads(dict_str, object_pairs_hook=dict_clean)
# Market data by contract/price in dataframe
data = []
for p in jsondata['markets']:
for k in p['contracts']:
data.append([p['id'],p['name'],k['id'],k['name'],k['bestBuyYesCost'],k['bestBuyNoCost'],k['bestSellYesCost'],k['bestSellNoCost']])
# Pandas dataframe named 'predictit_df'
predictit_df = pd.DataFrame(data)
# Update dataframe column names
predictit_df.columns=['Market_ID','Market_Name','Contract_ID','Contract_Name','PredictIt_Yes','bestBuyNoCost','BestSellYesCost','BestSellNoCost']
# Filter PredicitIt dataframe to presidential state markets/contracts
predictit_df = predictit_df[predictit_df['Market_Name'].str.contains("Which party will win") & predictit_df['Market_Name'].str.contains("2020 presidential election?")]
# Fix annoying typo (double space) in congressional district market names
predictit_df['Market_Name'] = predictit_df['Market_Name'].str.replace('in the 2020','in the 2020')
# Split Market_Name column into state name column
start_string = "Which party will win"
end_string = "in the 2020 presidential election?"
predictit_df['a'], predictit_df['state'] = predictit_df['Market_Name'].str.split(start_string, 1).str
predictit_df['state'], predictit_df['b'] = predictit_df['state'].str.split(end_string, 1).str
del predictit_df['a']
del predictit_df['b']
# Create answer column from contract names
predictit_df['answer'] = predictit_df['Contract_Name'].str.replace('Republican','Trump').str.replace('Democratic','Biden')
# Strip trailing/leading whitespaces in answer and state columns
predictit_df['state'] = predictit_df['state'].str.strip()
predictit_df['answer'] = predictit_df['answer'].str.strip()
# Pull in polling data from 538
pres_polling = pd.read_csv('https://projects.fivethirtyeight.com/polls-page/president_polls.csv')
pres_polling = pres_polling.dropna(subset=['state'])
# Drop extraneous columns
pres_polling = pres_polling.drop(['pollster_id', 'sponsor_ids','sponsors','display_name', 'pollster_rating_id', 'pollster_rating_name', 'fte_grade', 'sample_size', 'population', 'population_full', 'methodology', 'seat_number', 'seat_name', 'start_date', 'sponsor_candidate', 'internal', 'partisan', 'tracking', 'nationwide_batch', 'ranked_choice_reallocated', 'notes', 'url'], axis=1)
# Standardize congressional district names in 538 with PredictIt
pres_polling['state'] = pres_polling['state'].str.replace('Maine CD-1','ME-01')
pres_polling['state'] = pres_polling['state'].str.replace('Maine CD-2','ME-02')
pres_polling['state'] = pres_polling['state'].str.replace('Nebraska CD-2','NE-02')
# Filter to most recent poll for Biden & Trump
# create a count column for 'question_id' to work around "Delaware problem": multiple matchups in same survey
pres_polling = pres_polling.loc[pres_polling['pollster'] != 'SurveyMonkey'] # filter out SurveyMonkey polls
pres_polling['created_at'] = pd.to_datetime(pres_polling['created_at']) #convert 'created_at' to datetime
recent_pres_polling = pres_polling[pres_polling['answer'].isin(['Biden', 'Trump'])]
recent_pres_polling['Count'] = recent_pres_polling.groupby('question_id')['question_id'].transform('count')
recent_pres_polling = recent_pres_polling[(recent_pres_polling.Count > 1)]
recent_pres_polling = recent_pres_polling.sort_values(by=['question_id'], ascending=False).drop_duplicates(['state', 'candidate_name'], keep='first')
# Rename 538 'pct' column to '538_latest_poll'
recent_pres_polling = recent_pres_polling.rename({'pct': '538_latest_poll'}, axis=1)
# Rename 538 'end_date' column to '538_poll_date'
recent_pres_polling = recent_pres_polling.rename({'end_date': '538_poll_date'}, axis=1)
# Pull in polling data from 538 polling averages
pres_poll_avg = pd.read_csv('https://projects.fivethirtyeight.com/2020-general-data/presidential_poll_averages_2020.csv')
# Drop extraneous columns
pres_poll_avg = pres_poll_avg.drop(['cycle'], axis=1)
# Standardize congressional district names in 538 polling averages with PredictIt
pres_poll_avg['state'] = pres_poll_avg['state'].str.replace('Maine CD-1','ME-01')
pres_poll_avg['state'] = pres_poll_avg['state'].str.replace('Maine CD-2','ME-02')
pres_poll_avg['state'] = pres_poll_avg['state'].str.replace('Nebraska CD-2','NE-02')
# Standarize candidate names and column name
pres_poll_avg.replace({'candidate_name' : { '<NAME>.' : 'Biden', '<NAME>' : 'Trump'}})
pres_poll_avg['answer'] = pres_poll_avg['candidate_name']
# Filter to most recent poll for Biden & Trump
pres_poll_avg['modeldate'] = pd.to_datetime(pres_poll_avg['modeldate']) #convert 'modeldate' to datetime
pres_poll_avg = pres_poll_avg.sort_values(by=['modeldate']).drop_duplicates(['state', 'candidate_name'], keep='last')
pres_poll_avg = pres_poll_avg[pres_poll_avg['answer'].isin(['Biden', 'Trump'])]
# Round pct_estimate and pct_trend_adjusted to 2 decimal places
pres_poll_avg['pct_estimate'] = pres_poll_avg['pct_estimate'].round(2)
pres_poll_avg['pct_trend_adjusted'] = pres_poll_avg['pct_trend_adjusted'].round(2)
# Merge 538 poll and 538 poll averages dataframes together
recent_pres_polling = pd.merge(recent_pres_polling, pres_poll_avg, on=['state', 'answer'], how='left')
# Pull in most recent state-level model data from 538
pres_model = pd.read_csv('https://projects.fivethirtyeight.com/2020-general-data/presidential_state_toplines_2020.csv')
# Only keep latest models
pres_model = pres_model.sort_values(by=['modeldate'], ascending=False).drop_duplicates(['state', 'branch'], keep='first')
#Split into 2 dataframes for Trump and Biden
pres_model_inc = pres_model[['candidate_inc', 'state', 'winstate_inc', 'voteshare_inc', 'voteshare_inc_hi', 'voteshare_inc_lo', 'win_EC_if_win_state_inc', 'win_state_if_win_EC_inc']]
pres_model_chal = pres_model[['candidate_chal', 'state', 'winstate_chal', 'voteshare_chal', 'voteshare_chal_hi', 'voteshare_chal_lo', 'win_EC_if_win_state_chal', 'win_state_if_win_EC_chal']]
# Remove _inc and _chal from column names
pres_model_inc = pres_model_inc.rename(columns={'candidate_inc': 'answer', 'winstate_inc': 'winstate', 'voteshare_inc': 'voteshare', 'voteshare_inc_hi': 'voteshare_hi', 'voteshare_inc_lo': 'voteshare_lo', 'win_EC_if_win_state_inc': 'win_EC_if_win_state', 'win_state_if_win_EC_inc': 'win_state_if_win_EC'} )
pres_model_chal = pres_model_chal.rename(columns={'candidate_chal': 'answer', 'winstate_chal': 'winstate','voteshare_chal': 'voteshare', 'voteshare_chal_hi': 'voteshare_hi', 'voteshare_chal_lo': 'voteshare_lo', 'win_EC_if_win_state_chal': 'win_EC_if_win_state', 'win_state_if_win_EC_chal': 'win_state_if_win_EC'} )
# Concatenate Trump and Biden dataframes together
frames = [pres_model_inc, pres_model_chal]
pres_model = pd.concat(frames)
# Change 'District of Columbia' to 'DC'
pres_model['state'] = pres_model['state'].str.replace('District of Columbia','DC')
# Standardize congressional district names
pres_model['state'] = pres_model['state'].str.replace('ME-1','ME-01')
pres_model['state'] = pres_model['state'].str.replace('ME-2','ME-02')
pres_model['state'] = pres_model['state'].str.replace('NE-1','NE-01')
pres_model['state'] = pres_model['state'].str.replace('NE-2','NE-02')
pres_model['state'] = pres_model['state'].str.replace('NE-3','NE-03')
# Rename 538 'end_date' column to '538_poll_date'
pres_model = pres_model.rename({'winstate': '538_model'}, axis=1)
# Pull in most recent state-level model data from The Economist
url = 'https://cdn.economistdatateam.com/us-2020-forecast/data/president/economist_model_output.zip'
remote = urllib.request.urlopen(url) # read remote file
data = remote.read() # read from remote file
remote.close() # close urllib request
local = open('economist_model_output.zip', 'wb') # write binary to local file
local.write(data)
local.close() # close file
zf = zipfile.ZipFile('economist_model_output.zip')
econ_df = pd.read_csv(zf.open('output/site_data//state_averages_and_predictions_topline.csv'))
# Rename columns in econ_df
#econ_df = econ_df.rename({'projected_win_prob': 'dem_projected_win_prob'})
# Create Trump dataframe from Biden dataframe
econ_df_trump = econ_df.copy()
# Add answer column
econ_df['answer'] = 'Biden'
econ_df_trump['answer'] = 'Trump'
# Drop extraneous columns
econ_df = econ_df.drop(columns=['dem_average_low', 'dem_average_mean', 'dem_average_high', 'projected_vote_low', 'projected_vote_high', 'projected_vote_mean'])
econ_df_trump = econ_df_trump.drop(columns=['dem_average_low', 'dem_average_mean', 'dem_average_high', 'projected_vote_low', 'projected_vote_high', 'projected_vote_mean'])
# Calculate Trump probabilities from Biden probabilities
econ_df_trump['projected_win_prob'] = 1 - econ_df_trump['projected_win_prob']
# Concatenate dataframes
frames = [econ_df, econ_df_trump]
econ_df = pd.concat(frames)
# Standardize state names in econ_df
econ_df['state'] = econ_df['state'].map({
'AL':'Alabama',
'AK':'Alaska',
'AZ':'Arizona',
'AR':'Arkansas',
'CA':'California',
'CO':'Colorado',
'CT':'Connecticut',
'DE':'Delaware',
'DC':'DC',
'FL':'Florida',
'GA':'Georgia',
'HI':'Hawaii',
'ID':'Idaho',
'IL':'Illinois',
'IN':'Indiana',
'IA':'Iowa',
'KS':'Kansas',
'KY':'Kentucky',
'LA':'Louisiana',
'ME':'Maine',
'MD':'Maryland',
'MA':'Massachusetts',
'MI':'Michigan',
'MN':'Minnesota',
'MS':'Mississippi',
'MO':'Missouri',
'MT':'Montana',
'NE':'Nebraska',
'NV':'Nevada',
'NH':'New Hampshire',
'NJ':'New Jersey',
'NM':'New Mexico',
'NY':'New York',
'NC':'North Carolina',
'ND':'North Dakota',
'OH':'Ohio',
'OK':'Oklahoma',
'OR':'Oregon',
'PA':'Pennsylvania',
'RI':'Rhode Island',
'SC':'South Carolina',
'SD':'South Dakota',
'TN':'Tennessee',
'TX':'Texas',
'UT':'Utah',
'VT':'Vermont',
'VA':'Virginia',
'WA':'Washington',
'WV':'West Virginia',
'WI':'Wisconsin',
'WY':'Wyoming'})
# Change column names
econ_df = econ_df.rename(columns={"projected_win_prob": "Econ_model"})
econ_df = econ_df.rename(columns={"date": "Econ_date"})
# Pull in gambling odds
odds_df = pd.read_csv('https://raw.githubusercontent.com/mauricebransfield/predictit_538_odds/master/odds_state_presidential.csv', index_col=[0]) # error_bad_lines=False,
# Replace hyphen in state names with space
odds_df['state'] = odds_df['state'].str.replace('-',' ')
# Standardize Washington DC & Washington State
odds_df['state'] = odds_df['state'].str.replace('Washington Dc','DC')
odds_df['state'] = odds_df['state'].str.replace('Washington State','Washington')
# Replace party with candidate names
odds_df['answer'] = odds_df['answer'].str.replace('Republicans','Trump')
odds_df['answer'] = odds_df['answer'].str.replace('Democratic','Biden')
odds_df['answer'] = odds_df['answer'].str.replace('Democrats','Biden')
odds_df['answer'] = odds_df['answer'].str.replace('Democrat','Biden')
##### Drop rows with
odds_df = odds_df[odds_df.answer != '\n\n']
# Drop columns with all nan values
odds_df = odds_df.dropna(axis=1, how='all')
# Convert odds_df column headers to list
odds_df_columns = list(odds_df.columns.values)
odds_df_columns.remove('answer')
odds_df_columns.remove('state')
odds_df_loop = odds_df.copy()
del odds_df_loop['answer']
del odds_df_loop['state']
def split_more(x):
return pd.Series( x.split('/') )
# denominator / (denominator + numerator) = implied probability
# Loop through odds columns to convert fractional odds to new column of implied probability
for i in odds_df_columns:
odds_df_loop['numerator'], odds_df_loop['denominator'] = odds_df_loop[i].str.split('/', 1).str
odds_df_loop['denominator'] = pd.to_numeric(odds_df_loop['denominator'], errors='coerce').fillna(0).astype(np.int64)
odds_df_loop['denominator'] = odds_df_loop['denominator'].mask(odds_df_loop['denominator']==0).fillna(1) # workaround
odds_df_loop['numerator'] = pd.to_numeric(odds_df_loop['numerator'], errors='coerce').fillna(0).astype(np.int64)
odds_df_loop[str(i) + '_imp_prob'] = (odds_df_loop['denominator'] / (odds_df_loop['denominator'] + odds_df_loop['numerator'])).round(2)
# Concatenate imp_prob columns with 'answer' and 'state' columns
asdf = [odds_df['answer'], odds_df['state']]
headers = ["answer", "state"]
as_df = pd.concat(asdf, axis=1, keys=headers)
odds_imp_prob_df = pd.concat([odds_df_loop, as_df], axis=1)
# Merge PredictIt and odds dataframes together
df = pd.merge(predictit_df, odds_imp_prob_df, on=['state', 'answer'], how='left')
# Merge 538 polls into new dataframe
df = | pd.merge(df, recent_pres_polling, on=['state', 'answer'], how='left') | pandas.merge |
import numpy as np
import pandas as pd
import random
import plotly.express as px
from datetime import datetime
rows_to_keep = 43
sheet_data = pd.read_excel("https://docs.google.com/spreadsheets/d/1DuYUj2ODS8D3PWK42ZopUD1dqcg89ckI6vPn71LidGo/export?format=xlsx")
sheet_data = sheet_data.iloc[:rows_to_keep].drop(columns=["Overall % Lost", "Imgur"]).dropna(axis=1, how="all")
sheet_data = sheet_data.rename(columns={"$": "Pot Contribution", "Starting 1/1/22": "Starting Weight"})
sheet_data = sheet_data.melt(id_vars=["Participant", "Pot Contribution", "Paid?", "Starting Weight"], var_name="Date", value_name="Weight")
sheet_data["Starting Weight"] = | pd.to_numeric(sheet_data["Starting Weight"]) | pandas.to_numeric |
from hydroDL import kPath, utils
from hydroDL.app import waterQuality
from hydroDL.data import gageII, usgs, gridMET
from hydroDL.master import basins
from hydroDL.post import axplot, figplot
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import time
# read NTN
dirNTN = os.path.join(kPath.dirData, 'EPA', 'NTN')
fileData = os.path.join(dirNTN, 'NTN-All-w.csv')
fileSite = os.path.join(dirNTN, 'NTNsites.csv')
tabData = pd.read_csv(fileData)
tabSite = pd.read_csv(fileSite)
tabData['siteID'] = tabData['siteID'].apply(lambda x: x.upper())
tabData = tabData.replace(-9, np.nan)
# transfer to weekly
t1 = pd.to_datetime(tabData['dateon'],
infer_datetime_format=True).dt.normalize()
t2 = pd.to_datetime(tabData['dateoff'],
infer_datetime_format=True).dt.normalize()
wd1 = t1.dt.weekday
wd2 = t2.dt.weekday
ind = np.where((wd1 == 1) & (wd2 == 1))[0]
# pick out sites that are have relative large number of observations
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteNoLst-1979')
siteNoLstAll = | pd.read_csv(fileSiteNo, header=None, dtype=str) | pandas.read_csv |
# ===============================================================================
# Copyright 2018 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import json
import os
from copy import deepcopy
from geopandas import GeoDataFrame
from numpy import nan
from pandas import read_table, read_csv, DataFrame, Series, concat
from pandas.io.json import json_normalize
from map.tables import to_polygon
DROP = ['SOURCE_DESC', 'SECTOR_DESC', 'GROUP_DESC',
'COMMODITY_DESC', 'CLASS_DESC', 'PRODN_PRACTICE_DESC',
'UTIL_PRACTICE_DESC', 'STATISTICCAT_DESC', 'UNIT_DESC',
'SHORT_DESC', 'DOMAIN_DESC', 'DOMAINCAT_DESC', 'STATE_FIPS_CODE',
'ASD_CODE', 'ASD_DESC', 'COUNTY_ANSI',
'REGION_DESC', 'ZIP_5', 'WATERSHED_CODE',
'WATERSHED_DESC', 'CONGR_DISTRICT_CODE', 'COUNTRY_CODE',
'COUNTRY_NAME', 'LOCATION_DESC', 'YEAR', 'FREQ_DESC',
'BEGIN_CODE', 'END_CODE', 'REFERENCE_PERIOD_DESC',
'WEEK_ENDING', 'LOAD_TIME', 'VALUE', 'AGG_LEVEL_DESC',
'CV_%', 'STATE_ALPHA', 'STATE_NAME', 'COUNTY_NAME']
TSV = {1987: ('DS0041/35206-0041-Data.tsv', 'ITEM01018', 'FLAG01018'),
1992: ('DS0042/35206-0042-Data.tsv', 'ITEM010018', 'FLAG010018'),
1997: ('DS0043/35206-0043-Data.tsv', 'ITEM01019', 'FLAG01019')}
def get_old_nass(_dir, out_file):
master = None
first = True
for k, v in TSV.items():
print(v)
value = 'VALUE_{}'.format(k)
_file, item, flag = v
csv = os.path.join(_dir, _file)
df = read_table(csv)
df.columns = [str(x).upper() for x in df.columns]
df.index = df['FIPS']
try:
df.drop('FIPS', inplace=True)
except KeyError:
pass
df = df[['LEVEL', item, flag]]
df = df[df['LEVEL'] == 1]
if k != 1997:
df = df[df[flag] == 0]
df.dropna(axis=0, subset=[item], inplace=True, how='any')
if first:
first = False
master = deepcopy(df)
master[value] = df[item].astype(float)
master.drop([flag, item, 'LEVEL'], inplace=True, axis=1)
else:
master = concat([master, df], axis=1)
master[value] = df[item].astype(float)
master.drop([flag, item, 'LEVEL'], inplace=True, axis=1)
master.to_csv(out_file)
def get_nass(csv, out_file, old_nass=None):
first = True
if old_nass:
old_df = read_csv(old_nass)
old_df.index = old_df['FIPS']
for c in csv:
print(c)
try:
df = read_table(c, sep='\t')
assert len(list(df.columns)) > 2
except AssertionError:
df = | read_csv(c) | pandas.read_csv |
'''Functions used for the primary analysis'''
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedKFold, cross_val_predict
from scipy.stats import binom, chi2, norm
from copy import deepcopy
from multiprocessing import Pool
def threshold(probs, cutoff=.5):
'''Converts probabilities to class guesses.
Parameters
probs: the probabilities to be cut (float in [0, 1])
cutoff: the probability cut point (float in [0, 1])
Returns
class guesses as ints from {0, 1}
'''
return np.array(probs >= cutoff).astype(np.uint8)
def mcnemar_test(targets, guesses, cc=True):
'''Runs McNemar's test for the difference in paired proportions.
Parameters
targets: the true labels (arr of {0, 1})
guesses: the predicted labels (arr of {0, 1})
cc: whether to perform a continuity correction (bool)
Returns
'b': number of false negatives
'c': number of false positivies
'stat': chi-squared statistic
'pval': p-value from the test
'''
cm = confusion_matrix(true, pred)
b = int(cm[0, 1])
c = int(cm[1, 0])
if cc:
stat = (abs(b - c) - 1)**2 / (b + c)
else:
stat = (b - c)**2 / (b + c)
p = 1 - chi2(df=1).cdf(stat)
outmat = np.array([b, c, stat, p]).reshape(-1, 1)
out = pd.DataFrame(outmat.transpose(),
columns=['b', 'c', 'stat', 'pval'])
return out
def brier_score(targets, guesses):
'''Calculates Brier score, or mean squared error.
Parameters
targets: the true labels (arr of {0, 1})
guesses: the predicted scores (float in (0, 1) or int from {0, 1})
Returns
Brier score (float in (0, 1))
'''
return np.sum((guesses - targets)**2) / targets.shape[0]
def slim_metrics(df, rules, by=None):
'''Returns number and percent positive for a set of predicted labels.
Parameters
df: a data frame holding the columns of predicted labels
rules: column names for the predicted labels
by: criteria to use for counting, e.g., for calculating sensitivity
Returns
a df with the rule, n positive, and percent positive
'''
if by is not None:
good_idx = np.where(by == 1)[0]
df = df.iloc[good_idx]
N = df.shape[0]
out = np.zeros(shape=(len(rules), 2))
for i, rule in enumerate(rules):
out[i, 0] = np.sum(df[rule])
out[i, 1] = out[i, 0] / N
out_df = pd.DataFrame(out, columns=['n', 'pct'])
out_df['rule'] = rules
out_df = out_df[['rule', 'n', 'pct']]
return out_df
def clf_metrics(targets,
guesses,
average_by=None,
weighted=True,
round=4,
round_pval=False,
mcnemar=False):
'''Calculates a range of binary classification metrics for a set of class
predictions relative to a reference standard.
Keyword arugments:
targets: the true labels (arr of {0, 1})
guesses: the predicted labels (arr of {0, 1})
average_by: the variable to use for macro averaging (1-d array)
weighted: whether to weight macro averaging (bool)
round: number of significant digits to report
round_pval: whether to round p-values from McNemar's test (bool)
mcnemar: whether to run McNemar's test
Returns
a one-row data frame with the following columns:
tp: true positive count
fp: false positive count
tn: true negative count
fn: false negative count
sens: sensitivity
spec: specificity
ppv: positive predictive value
npv: negative predictive value
j: Youden's j index
mcc: Matthews correlation coefficient
brier: Brier score (or 1 - acc)
f1: F1 score
true_prev: true prevalence
pred_prev: predicted prevalence
abs_diff: absolute difference in prevalence
rel_prev_diff: percent difference in prevalence
mcnemar: p-value from McNemar's test (optional)
'''
# Converting pd.Series to np.array
stype = type(pd.Series())
if type(guesses) == stype:
guesses = guesses.values
if type(targets) == stype:
targets = targets.values
if type(average_by) == stype:
average_by == average_by.values
# Optionally returning macro-average results
if average_by is not None:
return macro_clf_metrics(targets=targets,
guesses=guesses,
by=average_by,
weighted=weighted,
round=round)
# Constructing the 2x2 table
confmat = confusion_matrix(targets, guesses)
tp = confmat[1, 1]
fp = confmat[0, 1]
tn = confmat[0, 0]
fn = confmat[1, 0]
# Calculating basic measures of diagnostic accuracy
sens = np.round(tp / (tp + fn), round)
spec = np.round(tn / (tn + fp), round)
ppv = np.round(tp / (tp + fp), round)
npv = np.round(tn / (tn + fn), round)
f1 = np.round(2 * (sens * ppv) / (sens + ppv), round)
j = sens + spec - 1
mcc_num = ((tp * tn) - (fp * fn))
mcc_denom = np.sqrt(((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
mcc = mcc_num / mcc_denom
brier = np.round(brier_score(targets, guesses), round)
outmat = np.array([tp, fp, tn, fn,
sens, spec, ppv,
npv, j, f1, mcc, brier]).reshape(-1, 1)
out = pd.DataFrame(outmat.transpose(),
columns=['tp', 'fp', 'tn',
'fn', 'sens', 'spec',
'ppv', 'npv', 'j',
'f1', 'mcc', 'brier'])
# Calculating some additional measures based on positive calls
true_prev = int(np.sum(targets == 1))
pred_prev = int(np.sum(guesses == 1))
abs_diff = (true_prev - pred_prev) * -1
rel_diff = np.round(abs_diff / true_prev, round)
if mcnemar:
pval = mcnemar_test(targets, guesses).pval[0]
if round_pval:
pval = np.round(pval, round)
count_outmat = np.array([true_prev, pred_prev, abs_diff,
rel_diff]).reshape(-1, 1)
count_out = pd.DataFrame(count_outmat.transpose(),
columns=['true_prev', 'pred_prev',
'prev_diff', 'rel_prev_diff'])
out = pd.concat([out, count_out], axis=1)
# Optionally dropping the mcnemar p-val
if mcnemar:
out['mcnemar'] = pval
return out
def macro_clf_metrics(targets,
guesses,
by,
weighted=True,
round=4,
p_method='harmonic',
mcnemar=True):
'''Performs weighted or unweighted macro-averaging of clf_metrics()
by a group variable.
Parameters
targets: the true labels(arr of {0 , 1})
guesses: the predict labels (arr of {0, 1})
by: an array of group IDs to use for averaging (1-d array)
weighted: whether to return a weighted average
round: number of significant digits to return
p_method: how to average p-values; may be 'harmonic' or 'fisher'
7. mcnemar: whether to run McNemar's test (bool)
Returns
the df from clf_metrics() where everything has been averaged
'''
# Column groups for rounding later
count_cols = ['tp', 'fp', 'tn', 'fn']
prev_cols = ['true_prev', 'pred_prev', 'prev_diff']
# Getting the indices for each group
n = len(targets)
group_names = np.unique(by)
n_groups = len(group_names)
group_idx = [np.where(by == group)[0]
for group in group_names]
group_counts = np.array([len(idx) for idx in group_idx])
# Calculating the groupwise statistics
group_stats = [clf_metrics(targets[idx],
guesses[idx],
mcnemar=mcnemar)
for idx in group_idx]
# Casting the basic counts as proportions
for i, df in enumerate(group_stats):
df[count_cols] /= group_counts[i]
df[prev_cols] /= group_counts[i]
group_stats = pd.concat(group_stats, axis=0)
# Calculating the weights
if weighted:
w = np.array(group_counts / n)
else:
w = np.repeat(1 / n_groups, n_groups)
# Calculating the mean values
averages = np.average(group_stats, axis=0, weights=w)
avg_stats = pd.DataFrame(averages).transpose()
avg_stats.columns = group_stats.columns.values
# Converting the count metrics back to integers
avg_stats[count_cols] *= n
avg_stats[count_cols] = avg_stats[count_cols].astype(int)
avg_stats[prev_cols] *= n
avg_stats.rel_prev_diff = avg_stats.prev_diff / avg_stats.true_prev
# Rounding off the floats
float_cols = ['sens', 'spec', 'npv',
'ppv', 'j', 'f1', 'brier']
avg_stats[float_cols] = avg_stats[float_cols].round(round)
avg_stats.rel_prev_diff = avg_stats.rel_prev_diff.round(round)
# Getting the mean of the p-values with either Fisher's method
# or the harmonic mean method
if mcnemar:
avg_stats.mcnemar = average_pvals(group_stats.mcnemar,
w=w,
method=p_method)
return avg_stats
def average_pvals(p_vals,
w=None,
method='harmonic',
smooth=True,
smooth_val=1e-7):
'''Averages p-values using either the harmonic mean or Fisher's method.
Parameters
p_vals: the p-values (arr of floats in [0, 1])
w: the weights for averaging
method: either 'harmonic' (default) or 'fisher' (str)
smooth: whether to fix pvals of 0.0 (bool)
smooth_val: the amount to use for smoothing (float)
Returns
the average p-value (single float in [0, 1])
'''
if smooth:
p = p_vals + smooth_val
else:
p = deepcopy(p_vals)
if method == 'harmonic':
if w is None:
w = np.repeat(1 / len(p), len(p))
p_avg = 1 / np.sum(w / p)
elif method == 'fisher':
stat = -2 * np.sum(np.log(p))
p_avg = 1 - chi2(df=1).cdf(stat)
return p_avg
def boot_sample(df,
by=None,
size=None,
seed=None,
return_df=False):
'''Returns a single bootstrap sample of rows from a data frame.
Parameters
df: the data frame holding the records (2-d array or pd.DataFrame)
by: an array of group IDs for sampling by group instead of row (arr)
size: the size of bootstrap samples to take, if not nrow(df) (int)
seed: seed to use for generating the random sample (int)
return_df: whether to return row indices (False) or the df (True)
Returns
1a. An array of bootstrap-sampled row numbers, if return_df is False; OR
1b. A boostrap sample of the original df, if return_df is True
'''
# Setting the random states for the samples
if seed is None:
seed = np.random.randint(1, 1e6, 1)[0]
np.random.seed(seed)
# Getting the sample size
if size is None:
size = df.shape[0]
# Sampling across groups, if group is unspecified
if by is None:
np.random.seed(seed)
idx = range(size)
boot = np.random.choice(idx,
size=size,
replace=True)
# Sampling by group, if group has been specified
else:
levels = np.unique(by)
level_idx = [np.where(by == level)[0]
for level in levels]
boot = np.random.choice(level_idx,
size=len(levels),
replace=True)
boot = np.concatenate(boot).ravel()
if not return_df:
return boot
else:
return df.iloc[boot, :]
def diff_boot_cis(ref,
comp,
a=0.05,
abs_diff=False,
method='bca',
interpolation='nearest'):
'''Calculates boostrap confidence intervals for the difference in
performance metrics between two competing classifiers.
Parameters
ref: the refernece multi.boot_cis object
comp: the comparison multi.boot_cis object
a: significance level for the intervals (float in [0, 1])
abs_diff: whether to take the absolute value of the difference (bool)
method: interval method; options are 'diff', 'pct', and 'bca'
interpolation: interpolation method for np.quantile
Returns
A pd.DataFrame with the following columns:
ref: the reference value for the metric
comp: the comparison value for the metric
d: the (absolute) difference between the ref and the comp values
lower: the lower bound for the difference
upper: the upper bound for the difference
'''
# Quick check for a valid estimation method
methods = ['pct', 'diff', 'bca']
assert method in methods, 'Method must be pct, diff, or bca.'
# Pulling out the original estiamtes
ref_stat = pd.Series(ref.cis.stat.drop('true_prev').values)
ref_scores = ref.scores.drop('true_prev', axis=1)
comp_stat = pd.Series(comp.cis.stat.drop('true_prev').values)
comp_scores = comp.scores.drop('true_prev', axis=1)
# Optionally Reversing the order of comparison
diff_scores = comp_scores - ref_scores
diff_stat = comp_stat - ref_stat
# Setting the quantiles to retrieve
lower = (a / 2) * 100
upper = 100 - lower
# Calculating the percentiles
if method == 'pct':
cis = np.nanpercentile(diff_scores,
q=(lower, upper),
interpolation=interpolation,
axis=0)
cis = pd.DataFrame(cis.transpose())
elif method == 'diff':
diffs = diff_stat.values.reshape(1, -1) - diff_scores
percents = np.nanpercentile(diffs,
q=(lower, upper),
interpolation=interpolation,
axis=0)
lower_bound = pd.Series(diff_stat + percents[0])
upper_bound = pd.Series(diff_stat + percents[1])
cis = pd.concat([lower_bound, upper_bound], axis=1)
elif method == 'bca':
# Removing true prevalence from consideration to avoid NaNs
ref_j_means = ref.jack[1].drop('true_prev')
ref_j_scores = ref.jack[0].drop('true_prev', axis=1)
comp_j_means = comp.jack[1].drop('true_prev')
comp_j_scores = comp.jack[0].drop('true_prev', axis=1)
# Calculating the bias-correction factor
n = ref.scores.shape[0]
stat_vals = diff_stat.transpose().values.ravel()
n_less = np.sum(diff_scores < stat_vals, axis=0)
p_less = n_less / n
z0 = norm.ppf(p_less)
# Fixing infs in z0
z0[np.where(np.isinf(z0))[0]] = 0.0
# Estiamating the acceleration factor
j_means = comp_j_means - ref_j_means
j_scores = comp_j_scores - ref_j_scores
diffs = j_means - j_scores
numer = np.sum(np.power(diffs, 3))
denom = 6 * np.power(np.sum(np.power(diffs, 2)), 3/2)
# Getting rid of 0s in the denominator
zeros = np.where(denom == 0)[0]
for z in zeros:
denom[z] += 1e-6
acc = numer / denom
# Calculating the bounds for the confidence intervals
zl = norm.ppf(a / 2)
zu = norm.ppf(1 - (a/2))
lterm = (z0 + zl) / (1 - acc*(z0 + zl))
uterm = (z0 + zu) / (1 - acc*(z0 + zu))
lower_q = norm.cdf(z0 + lterm) * 100
upper_q = norm.cdf(z0 + uterm) * 100
# Returning the CIs based on the adjusted quantiles
cis = [np.nanpercentile(diff_scores.iloc[:, i],
q=(lower_q[i], upper_q[i]),
interpolation=interpolation,
axis=0)
for i in range(len(lower_q))]
cis = | pd.DataFrame(cis, columns=['lower', 'upper']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import argparse
import random
def create_context_to_id_map(df, df_sent):
context_to_id = {}
c_context_id = 0
context_ids = []
relevant_sentence_ids_arr = []
df = df.reset_index()
for index, row in df.iterrows():
# add the relevant sentences to the main df
relevant_sentence_ids = df_sent.iloc[index]['ranked_matching_sentence_ids']
relevant_sentence_ids_arr.append(relevant_sentence_ids)
# map the ids
if not row['context'] in context_to_id:
context_id = c_context_id
context_to_id[row['context']] = c_context_id
c_context_id += 1
else:
context_id = context_to_id[row['context']]
context_ids.append(context_id)
print('Num context texts: ', len(context_to_id.keys()))
return context_ids, relevant_sentence_ids_arr
def train_val_split(df, frac):
train_context_ids = []
val_context_ids = []
df_train = pd.DataFrame()
df_val = | pd.DataFrame() | pandas.DataFrame |
import asyncio
import copy
import logging
import talib as ta
from .exceptions import NotImplementedException
from sklearn.cluster import KMeans, DBSCAN, MeanShift
from sklearn.metrics import silhouette_score
import pandas as pd
import numpy as np
from itertools import groupby
from operator import itemgetter
from .utils import time_scale_to_milisecond
class Analyzer():
"""
The duty of Analyzer class is to provide analysis objects.
It is configurable via the config file
Use case does not require multiple instance
"""
# This initiation may not be needed
# TODO: Normally lambda functions would be quite useful to have one-liner functions,
# however they are not "awaitable". Thus each one-liner lambda expression should be an awaitable method
def __init__(self, _config):
self.logger = logging.getLogger('app.{}'.format(__name__))
self.config = _config
self.current_time_df={}
return
async def sample_analyzer(self, data_dict):
analysis_dict=dict()
for pair,data_obj in data_dict.items():
analysis_obj = dict()
for time_scale, time_df in data_obj.items():
self.current_time_df = copy.deepcopy(time_df)
# Generate coroutines
indicator_coroutines = []
header = '_ind_'
indicator_method_names = list(map(lambda orig_string: header + orig_string, self.config['analysis']['indicators'].keys()))
for ind in indicator_method_names:
if hasattr(self, ind): indicator_coroutines.append(getattr(self, ind)())
else: raise RuntimeError(f'Unknown indicator: "{ind}"')
analysis_output = list(await asyncio.gather(*indicator_coroutines))
# NOTE: Since coroutines are not reuseable, they require to be created in each cycle
# NOTE: pd.Series needs to be casted to list
stats = dict()
for key, value in zip(self.config['analysis']['indicators'].keys(), analysis_output):
stats[key] = value
# Assign "stats" to each "time_scale"
analysis_obj[time_scale] = stats
analysis_dict[pair] = analysis_obj
return analysis_dict
async def visual_analysis(self, data_dict):
analysis_dict=dict()
for pair,data_obj in data_dict.items():
analysis_obj = dict()
for time_scale, time_df in data_obj.items():
self.current_time_df = copy.deepcopy(time_df)
# Generate coroutines
indicator_coroutines = []
header = '_ind_'
indicator_method_names = list(map(lambda orig_string: header + orig_string, self.config['visualization']['indicators'].keys()))
for ind in indicator_method_names:
if hasattr(self, ind): indicator_coroutines.append(getattr(self, ind)())
else: raise RuntimeError(f'Unknown indicator: "{ind}"')
header = '_pat_'
pattern_method_names = list(map(lambda orig_string: header + orig_string, self.config['visualization']['patterns'])) # Patterns do not take arg
for pat in pattern_method_names:
if hasattr(self, pat): indicator_coroutines.append(getattr(self, pat)())
else: raise RuntimeError(f'Unknown pattern: "{pat}"')
analysis_output = list(await asyncio.gather(*indicator_coroutines))
# NOTE: Since coroutines are not reuseable, they require to be created in each cycle
# NOTE: pd.Series needs to be casted to list
stats = dict()
for key, value in zip(list(self.config['visualization']['indicators'].keys()) + self.config['visualization']['patterns'], analysis_output):
stats[key] = value
# Assign "stats" to each "time_scale"
analysis_obj[time_scale] = stats
analysis_dict[pair] = analysis_obj
return analysis_dict
# Analyzers
async def _ind_market_classifier(self):
# TODO: Market status receives the name of some other indicators and runs
# a secondary analysis.
# Maybe the secondary analysis such as S/R levels should be put under
# another category
analyzer = "_ind_" + self.config['visualization']['indicators']['market_classifier']
if hasattr(self, analyzer):
analysis_output = await getattr(self, analyzer)()
classification = {}
if analyzer == '_ind_aroonosc':
uptrend_filter = np.where(np.array(analysis_output) > 0)[0]
downtrend_filter = np.where(np.array(analysis_output) < 0)[0]
classification = {'downtrend':downtrend_filter, 'uptrend':uptrend_filter}
elif analyzer == '_ind_fractal_aroon':
uptrend_filter = np.where(np.nan_to_num(analysis_output['aroonup']) > 80)[0]
downtrend_filter = np.where(np.nan_to_num(analysis_output['aroondown']) > 80)[0]
classification = {'downtrend':downtrend_filter, 'uptrend':uptrend_filter}
ts_index = self.current_time_df.index
result = {}
# TODO: Make validation counter generic
validation_counter = 5
for class_name, filter_idx in classification.items():
class_item_list = []
for k, g in groupby(enumerate(filter_idx), lambda ix: ix[0] - ix[1]):
seq_idx = list(map(itemgetter(1), g))
# NOTE: If the sq. length is 1 than it will not be displayed. Apply "seq_idx[-1]+1" if you need to
#if len(seq_idx) >= validation_counter:
# class_item = {'start':ts_index[seq_idx[0]], 'end':ts_index[seq_idx[-1]], 'validation_point':ts_index[seq_idx[0]+validation_counter -1]}
# class_item_list.append(class_item)
class_item = {'start':ts_index[seq_idx[0]], 'end':ts_index[seq_idx[-1]]}
class_item_list.append(class_item)
result[class_name] = class_item_list
'''
Sample: result
{
downtrend:[
{
start_ts:
end_ts:
validation_point:
},
...
]
}
'''
# if last closed candle is in uptrend, then then 'end' parameter wikk be equal to its timestamp
# so the day_diff will be 1
result['is_daydiff']=int((self.current_time_df.index[-1] - result['uptrend'][-1]['end'])/time_scale_to_milisecond('1d'))
result['is_lastidx']=int(analysis_output['aroonup'][-1] > 80)
return result
async def _ind_fractal_aroon(self):
fractal_line = await self._ind_fractal_line_3()
aroondown, aroonup = ta.AROON(pd.Series(fractal_line['bearish']), pd.Series(fractal_line['bullish']), timeperiod=25)
return {'aroonup':list(aroonup), 'aroondown': list(aroondown)}
async def _ind_fractal_aroonosc(self):
fractal_line = await self._ind_fractal_line_3()
return list(ta.AROONOSC( | pd.Series(fractal_line['bearish']) | pandas.Series |
from analytic_types.segment import Segment
import utils
import unittest
import numpy as np
import pandas as pd
import math
import random
RELATIVE_TOLERANCE = 1e-1
class TestUtils(unittest.TestCase):
#example test for test's workflow purposes
def test_segment_parsion(self):
self.assertTrue(True)
def test_confidence_all_normal_value(self):
segment = [1, 2, 0, 6, 8, 5, 3]
utils_result = utils.find_confidence(segment)[0]
result = 4.0
self.assertTrue(math.isclose(utils_result, result, rel_tol = RELATIVE_TOLERANCE))
def test_confidence_all_nan_value(self):
segment = [np.nan, np.nan, np.nan, np.nan]
self.assertEqual(utils.find_confidence(segment)[0], 0)
def test_confidence_with_nan_value(self):
data = [np.nan, np.nan, 0, 8]
utils_result = utils.find_confidence(data)[0]
result = 4.0
self.assertTrue(math.isclose(utils_result, result, rel_tol = RELATIVE_TOLERANCE))
def test_interval_all_normal_value(self):
data = [1, 2, 1, 2, 4, 1, 2, 4, 5, 6]
data = pd.Series(data)
center = 4
window_size = 2
result = [1, 2, 4, 1, 2]
self.assertEqual(list(utils.get_interval(data, center, window_size)), result)
def test_interval_wrong_ws(self):
data = [1, 2, 4, 1, 2, 4]
data = pd.Series(data)
center = 3
window_size = 6
result = [1, 2, 4, 1, 2, 4]
self.assertEqual(list(utils.get_interval(data, center, window_size)), result)
def test_subtract_min_without_nan(self):
segment = [1, 2, 4, 1, 2, 4]
segment = pd.Series(segment)
result = [0, 1, 3, 0, 1, 3]
utils_result = list(utils.subtract_min_without_nan(segment))
self.assertEqual(utils_result, result)
def test_subtract_min_with_nan(self):
segment = [np.nan, 2, 4, 1, 2, 4]
segment = pd.Series(segment)
result = [2, 4, 1, 2, 4]
utils_result = list(utils.subtract_min_without_nan(segment)[1:])
self.assertEqual(utils_result, result)
def test_get_convolve(self):
data = [1, 2, 3, 2, 2, 0, 2, 3, 4, 3, 2, 1, 1, 2, 3, 4, 3, 2, 0]
data = pd.Series(data)
pattern_index = [2, 8, 15]
window_size = 2
av_model = [1, 2, 3, 2, 1]
result = []
self.assertNotEqual(utils.get_convolve(pattern_index, av_model, data, window_size), result)
def test_get_convolve_with_nan(self):
data = [1, 2, 3, 2, np.nan, 0, 2, 3, 4, np.nan, 2, 1, 1, 2, 3, 4, 3, np.nan, 0]
data = pd.Series(data)
pattern_index = [2, 8, 15]
window_size = 2
av_model = [1, 2, 3, 2, 1]
result = utils.get_convolve(pattern_index, av_model, data, window_size)
for val in result:
self.assertFalse(np.isnan(val))
def test_get_convolve_empty_data(self):
data = []
pattern_index = []
window_size = 2
window_size_zero = 0
av_model = []
result = []
self.assertEqual(utils.get_convolve(pattern_index, av_model, data, window_size), result)
self.assertEqual(utils.get_convolve(pattern_index, av_model, data, window_size_zero), result)
def test_find_jump_parameters_center(self):
segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
segment = pd.Series(segment)
jump_center = [10, 11]
self.assertIn(utils.find_pattern_center(segment, 0, 'jump'), jump_center)
def test_find_jump_parameters_height(self):
segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
segment = pd.Series(segment)
jump_height = [3.5, 4]
self.assertGreaterEqual(utils.find_parameters(segment, 0, 'jump')[0], jump_height[0])
self.assertLessEqual(utils.find_parameters(segment, 0, 'jump')[0], jump_height[1])
def test_find_jump_parameters_length(self):
segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
segment = pd.Series(segment)
jump_length = 2
self.assertEqual(utils.find_parameters(segment, 0, 'jump')[1], jump_length)
def test_find_drop_parameters_center(self):
segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
segment = pd.Series(segment)
drop_center = [14, 15, 16]
self.assertIn(utils.find_pattern_center(segment, 0, 'drop'), drop_center)
def test_find_drop_parameters_height(self):
segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
segment = pd.Series(segment)
drop_height = [3.5, 4]
self.assertGreaterEqual(utils.find_parameters(segment, 0, 'drop')[0], drop_height[0])
self.assertLessEqual(utils.find_parameters(segment, 0, 'drop')[0], drop_height[1])
def test_find_drop_parameters_length(self):
segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
segment = pd.Series(segment)
drop_length = 2
self.assertEqual(utils.find_parameters(segment, 0, 'drop')[1], drop_length)
def test_get_av_model_empty_data(self):
patterns_list = []
result = []
self.assertEqual(utils.get_av_model(patterns_list), result)
def test_get_av_model_normal_data(self):
patterns_list = [[1, 1, 1], [2, 2, 2],[3,3,3]]
result = [2.0, 2.0, 2.0]
self.assertEqual(utils.get_av_model(patterns_list), result)
def test_find_jump_nan_data(self):
data = [np.nan, np.nan, np.nan, np.nan]
data = pd.Series(data)
length = 2
height = 3
length_zero = 0
height_zero = 0
result = []
self.assertEqual(utils.find_jump(data, height, length), result)
self.assertEqual(utils.find_jump(data, height_zero, length_zero), result)
def test_find_drop_nan_data(self):
data = [np.nan, np.nan, np.nan, np.nan]
data = pd.Series(data)
length = 2
height = 3
length_zero = 0
height_zero = 0
result = []
self.assertEqual(utils.find_drop(data, height, length), result)
self.assertEqual(utils.find_drop(data, height_zero, length_zero), result)
def test_get_distribution_density(self):
segment = [1, 1, 1, 3, 5, 5, 5]
segment = pd.Series(segment)
result = (3, 5, 1)
self.assertEqual(utils.get_distribution_density(segment), result)
def test_get_distribution_density_right(self):
data = [1.0, 5.0, 5.0, 4.0]
data = | pd.Series(data) | pandas.Series |
import os
import sys
import sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import preprocessor as p
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import svm
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import wget
import dload
from pylatex import Document, Section, Subsection, Command
from pylatex.utils import italic, NoEscape
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import plot_roc_curve, plot_confusion_matrix
from datetime import date
from io import BytesIO
from io import StringIO
#from IPython import display
import base64
from wordcloud import WordCloud
import seaborn as sns
import uuid
from sklearn.metrics import auc
import requests
import io
import fileinput
# Download/create the dataset
def fetch():
print("fetching dataset!") # replace this with code to fetch the dataset
url = 'https://raw.githubusercontent.com/vijayakuruba/Data/main/gender-classifier-DFE-791531.csv'
wget.download(url)
print("Download complete!")
def clean_data(df):
tweets = []
for line in df:
# send to tweet_processor
line_cleaned = p.clean(line)
line_cleaned = line_cleaned.lower()
tweets.append(line_cleaned)
return tweets
def prepare_data(df):
#clean_tweets(df)
df_tweet = clean_data(df["text"])
df_tweet = pd.DataFrame(df_tweet)
df_text = clean_data(df["description"].fillna(""))
df_text = pd.DataFrame(df_text)
df["clean_tweet"] = df_tweet
df["clean_text"] = df_text
return df
def wordcloud(X, y,vectorizer,x_transform):
df = | pd.DataFrame({'Tweets':X, 'Gender':y}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/27 9:55 AM
# @Author : R
# @File : TMDB_Predict_Finally.py
# @Software: PyCharm
# coding: utf-8
# # Kaggle for TMDB
# In[1]:
import numpy as np
import pandas as pd
import warnings
from tqdm import tqdm
from datetime import datetime
from sklearn.preprocessing import LabelEncoder
from collections import Counter
warnings.filterwarnings('ignore')
# get_ipython().run_line_magic('matplotlib', 'inline')
# Data description
# id๏ผๆฏ้จ็ตๅฝฑ็ๅฏไธๆ ๅฟ
# belongs_to_collection:jsonๆ ผๅผไธๆฏ้จ็ตๅฝฑ็tmdb id๏ผ ็ตๅฝฑๅใ็ตๅฝฑๆตทๆฅๅ็ตๅฝฑ่ๆฏ็URL
# budget:็ตๅฝฑ้ข็ฎ๏ผๆฐๅผไธบ0่กจ็คบๆช็ฅ
# genres๏ผ็ตๅฝฑ้ฃๆ ผๅ่กจ๏ผjsonๆไปถ๏ผๅ
ๅซidใname
# homepage๏ผ็ตๅฝฑๅฎๆนไธป้กต็URL
# imdb_id:่ฏฅ็ตๅฝฑๅจimdbๆฐๆฎๅบไธญ็ๅฏไธidๆ ๅฟ
# original_language๏ผ็ตๅฝฑๅถไฝ็ๅๅง่ฏญ่จ๏ผ้ฟๅบฆไธบ2็ๅญ็ฌฆไธฒ
# original_title๏ผ็ตๅฝฑ็ๅๅงๅ็งฐ๏ผๅฏ่ฝไธbelong_to_collectionไธญ็ๅ็งฐไธๅ
# overview๏ผ ๅงๆ
ๆ่ฆ
# popularity๏ผ ็ตๅฝฑ็ๅๆฌข่ฟ็จๅบฆ๏ผfloatๆฐๅผ่กจ็คบ
# poster_path: ็ตๅฝฑๆตทๆฅ็URL
# production_companies๏ผjsonๆ ผๅผ๏ผ็ตๅฝฑๅถ้ ๅ
ฌๅธ็idใname
# production_countries๏ผjsonๆ ผๅผ๏ผ็ตๅฝฑๅถ้ ๅฝๅฎถ 2ๅญ็ฌฆ็ฎ็งฐใๅ
จ็งฐ
# release_date๏ผ็ตๅฝฑไธๆ ๆถ้ด
# runtime๏ผ็ตๅฝฑๆถ้ฟ
# spoken_languages๏ผ็ตๅฝฑ่ฏญ่จ็ๆฌ๏ผjsonๆ ผๅผ
# status:็ตๅฝฑๆฏๅฆๅทฒ็ปๅๅธ
# tagline๏ผ ็ตๅฝฑ็ๆ ่ฏญ
# title: ็ตๅฝฑ็่ฑๆๅ็งฐ
# keywords๏ผ็ตๅฝฑๅ
ณ้ฎๅญ๏ผjsonๆ ผๅผ
# cast: jsonๆ ผๅผ๏ผๆผๅๅ่กจ๏ผๅ
ๆฌid๏ผname๏ผๆงๅซ็ญ
# crew๏ผ็ตๅฝฑๅถไฝไบบๅ็ไฟกๆฏ๏ผๅ
ๆฌๅฏผๆผ๏ผไฝ่
็ญ
# revenue๏ผๆปๆถๅ
ฅ๏ผๅพ
้ขๆตๅผ
# # EDA
# EDAๅทฒๅ
# ็นๅพๅทฅ็จไปฅๅ้ขๆต
# ๅฉ็จไธคไธช้ขๅค็ๆฐๆฎ้ๅ
# 1.TMDB Competition Additional Features:ๆฌๆฐๆฎๅ
ๅซๆฐ็ไธไธช็นๅพpopularity2ใratingใtotalVotes
# 2.TMDB Competition Additional Training Data๏ผ้ขๅค็2000ไธช่ฎญ็ปๆฐๆฎ๏ผๆฒกๆ็ปๅฎ่ฎญ็ป้ไธญๆๆ็ๅฑๆง
# In[52]:
# Feature Engineering & Prediction
# ๆฐๆฎ้ขๅค็ๅฝๆฐ๏ผๅ
ๆฌๅฐ้ๆฐๅผๅๅฑๆง่ฝฌๅไธบๆฐๅผๅ
def prepare(df):
global json_cols
global train_dict
df[['release_month', 'release_day', 'release_year']] = df['release_date'].str.split('/', expand=True).replace(
np.nan, 0).astype(int)
df['release_year'] = df['release_year']
df.loc[(df['release_year'] <= 19) & (df['release_year'] < 100), "release_year"] += 2000
df.loc[(df['release_year'] > 19) & (df['release_year'] < 100), "release_year"] += 1900
# ่ทๅๅ่กๆฅๆ็ๆๆใๅญฃๅบฆไฟกๆฏ
releaseDate = pd.to_datetime(df['release_date'])
df['release_dayofweek'] = releaseDate.dt.dayofweek
df['release_quarter'] = releaseDate.dt.quarter
# ๅฏนratingใtotalVotesๅฑๆง่ฟ่กๅกซๅ
rating_na = df.groupby(["release_year", "original_language"])['rating'].mean().reset_index()
df[df.rating.isna()]['rating'] = df.merge(rating_na, how='left', on=["release_year", "original_language"])
vote_count_na = df.groupby(["release_year", "original_language"])['totalVotes'].mean().reset_index()
df[df.totalVotes.isna()]['totalVotes'] = df.merge(vote_count_na, how='left',
on=["release_year", "original_language"])
# df['rating'] = df['rating'].fillna(1.5)
# df['totalVotes'] = df['totalVotes'].fillna(6)
# ๆๅปบไธไธชๆฐๅฑๆง๏ผweightRating
df['weightedRating'] = (df['rating'] * df['totalVotes'] + 6.367 * 1000) / (df['totalVotes'] + 1000)
# ่่ๅฐไธๅๆถๆ็้ข้ขๆไนไธๅ๏ผๅฏนๅ
ถ่ฟ่กโ้่ดง่จ่โ๏ผ้่ดง่จ่ๆฏไพไธบ1.8%/ๅนด
df['originalBudget'] = df['budget']
df['inflationBudget'] = df['budget'] + df['budget'] * 1.8 / 100 * (
2018 - df['release_year']) # Inflation simple formula
df['budget'] = np.log1p(df['budget'])
# ๅฏนcrewใcastๅฑๆงไธญไบบๅๆงๅซๆๆ่ฟ่ก็ป่ฎก
df['genders_0_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_crew'] = df['crew'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
df['genders_0_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 0]))
df['genders_1_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 1]))
df['genders_2_cast'] = df['cast'].apply(lambda x: sum([1 for i in x if i['gender'] == 2]))
# ๅฏนbelongs_to_collectionใKeywordsใcast่ฟ่ก็ป่ฎก
df['_collection_name'] = df['belongs_to_collection'].apply(lambda x: x[0]['name'] if x != {} else 0)
le = LabelEncoder()
le.fit(list(df['_collection_name'].fillna('')))
df['_collection_name'] = le.transform(df['_collection_name'].fillna('').astype(str))
df['_num_Keywords'] = df['Keywords'].apply(lambda x: len(x) if x != {} else 0)
df['_num_cast'] = df['cast'].apply(lambda x: len(x) if x != {} else 0)
df['_num_crew'] = df['crew'].apply(lambda x: len(x) if x != {} else 0)
df['_popularity_mean_year'] = df['popularity'] / df.groupby("release_year")["popularity"].transform('mean')
df['_budget_runtime_ratio'] = df['budget'] / df['runtime']
df['_budget_popularity_ratio'] = df['budget'] / df['popularity']
df['_budget_year_ratio'] = df['budget'] / (df['release_year'] * df['release_year'])
df['_budget_year_ratio'] = df['budget'] / (df['release_year'] * df['release_year'])
df['_releaseYear_popularity_ratio'] = df['release_year'] / df['popularity']
df['_releaseYear_popularity_ratio2'] = df['popularity'] / df['release_year']
df['_popularity_totalVotes_ratio'] = df['totalVotes'] / df['popularity']
df['_rating_popularity_ratio'] = df['rating'] / df['popularity']
df['_rating_totalVotes_ratio'] = df['totalVotes'] / df['rating']
df['_totalVotes_releaseYear_ratio'] = df['totalVotes'] / df['release_year']
df['_budget_rating_ratio'] = df['budget'] / df['rating']
df['_runtime_rating_ratio'] = df['runtime'] / df['rating']
df['_budget_totalVotes_ratio'] = df['budget'] / df['totalVotes']
# ๅฏนๆฏๅฆๆhomepageๅ็ฑป
df['has_homepage'] = 1
df.loc[pd.isnull(df['homepage']), "has_homepage"] = 0
# ๅฏนbelongs_to_collectionๆฏๅฆไธบ็ฉบๅ็ฑป
df['isbelongs_to_collectionNA'] = 0
df.loc[pd.isnull(df['belongs_to_collection']), "isbelongs_to_collectionNA"] = 1
# ๅฏนtaglineๆฏๅฆไธบ็ฉบๅ็ฑป
df['isTaglineNA'] = 0
df.loc[df['tagline'] == 0, "isTaglineNA"] = 1
# ๅฏนoriginalโโlanguesๆฏๅฆไธบEnglishๅคๅฎ
df['isOriginalLanguageEng'] = 0
df.loc[df['original_language'] == "en", "isOriginalLanguageEng"] = 1
# ๅฏน็ตๅฝฑๅๆฏๅฆไธๅๅคๅฎ
df['isTitleDifferent'] = 1
df.loc[df['original_title'] == df['title'], "isTitleDifferent"] = 0
# ๅฏน็ตๅฝฑๆฏๅฆไธๆ ๅคๅฎ
df['isMovieReleased'] = 1
df.loc[df['status'] != "Released", "isMovieReleased"] = 0
# ็ตๅฝฑๆฏๅฆๆๆ่ฆ
df['isOverviewNA'] = 0
df.loc[pd.isnull(df['overview']), 'isOverviewNA'] = 1
# ่ทๅcollection id
df['collection_id'] = df['belongs_to_collection'].apply(lambda x: np.nan if len(x) == 0 else x[0]['id'])
# ๅฏนoriginalโโtitle็ญๅฑๆง็ป่ฎก้ฟๅบฆ
df['original_title_letter_count'] = df['original_title'].str.len()
df['original_title_word_count'] = df['original_title'].str.split().str.len()
# ๅฏนtitleใoverviewใtagline็ป่ฎก้ฟๅบฆๆไธชๆฐ
df['title_word_count'] = df['title'].str.split().str.len()
df['overview_word_count'] = df['overview'].str.split().str.len()
df['tagline_word_count'] = df['tagline'].str.split().str.len()
df['len_title'] = df['title'].fillna('').apply(lambda x: len(str(x)))
# ๅฏนproduction_conpanyใcountryใcastใcrewใspoken_languages็ป่ฎก
df['production_countries_count'] = df['production_countries'].apply(lambda x: len(x))
df['production_companies_count'] = df['production_companies'].apply(lambda x: len(x))
df['cast_count'] = df['cast'].apply(lambda x: len(x))
df['crew_count'] = df['crew'].apply(lambda x: len(x))
df['spoken_languages_count'] = df['spoken_languages'].apply(lambda x: len(x))
df['genres_count'] = df['genres'].apply(lambda x: len(x))
# ่ฟ่กๆๅนดๅ็ป่ฎก็ฎๅๅผๅกซๅ
df['meanruntimeByYear'] = df.groupby("release_year")["runtime"].aggregate('mean')
df['meanPopularityByYear'] = df.groupby("release_year")["popularity"].aggregate('mean')
df['meanBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('mean')
df['meantotalVotesByYear'] = df.groupby("release_year")["totalVotes"].aggregate('mean')
df['meanTotalVotesByRating'] = df.groupby("rating")["totalVotes"].aggregate('mean')
df['medianBudgetByYear'] = df.groupby("release_year")["budget"].aggregate('median')
####################################################################################
df['_popularity_theatrical_ratio'] = df['theatrical'] / df['popularity']
df['_budget_theatrical_ratio'] = df['budget'] / df['theatrical']
# runtime
df['runtime_cat_min_60'] = df['runtime'].apply(lambda x: 1 if (x <= 60) else 0)
df['runtime_cat_61_80'] = df['runtime'].apply(lambda x: 1 if (x > 60) & (x <= 80) else 0)
df['runtime_cat_81_100'] = df['runtime'].apply(lambda x: 1 if (x > 80) & (x <= 100) else 0)
df['runtime_cat_101_120'] = df['runtime'].apply(lambda x: 1 if (x > 100) & (x <= 120) else 0)
df['runtime_cat_121_140'] = df['runtime'].apply(lambda x: 1 if (x > 120) & (x <= 140) else 0)
df['runtime_cat_141_170'] = df['runtime'].apply(lambda x: 1 if (x > 140) & (x <= 170) else 0)
df['runtime_cat_171_max'] = df['runtime'].apply(lambda x: 1 if (x >= 170) else 0)
lang = df['original_language']
df_more_17_samples = [x[0] for x in Counter(pd.DataFrame(lang).stack()).most_common(17)]
for col in df_more_17_samples:
df[col] = df['original_language'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 12):
df['month' + str(col)] = df['release_month'].apply(lambda x: 1 if x == col else 0)
# feature engeneering : Release date per quarter one hot encoding
for col in range(1, 4):
df['quarter' + str(col)] = df['release_quarter'].apply(lambda x: 1 if x == col else 0)
for col in range(1, 7):
df['dayofweek' + str(col)] = df['release_dayofweek'].apply(lambda x: 1 if x == col else 0)
# ๆฐๅ ๅ
ฅๅฑๆง
df['is_release_day_of_1'] = 0
df.loc[df['release_day'] == 1, 'is_release_day_of_1'] = 1
df['is_release_day_of_15'] = 0
df.loc[df['release_day'] == 15, 'is_release_day_of_15'] = 1
# ๆฐๅฑๆงๅ ๅ
ฅ
# df['popularity2'] = np.log1p(df['popularity2'])
# df['popularity'] = np.log1p(df['popularity'])
# for col in range(1, 32):
# df['release_day' + str(col)] = df['release_day'].apply(lambda x: 1 if x == col else 0)
df['is_release_day_of_31'] = 0
df.loc[df['release_day'] == 31, 'is_release_day_of_15'] = 1
# popularity
df['popularity_cat_25'] = df['popularity'].apply(lambda x: 1 if (x <= 25) else 0)
df['popularity_cat_26_50'] = df['popularity'].apply(lambda x: 1 if (x > 25) & (x <= 50) else 0)
df['popularity_cat_51_100'] = df['popularity'].apply(lambda x: 1 if (x > 50) & (x <= 100) else 0)
df['popularity_cat_101_150'] = df['popularity'].apply(lambda x: 1 if (x > 100) & (x <= 150) else 0)
df['popularity_cat_151_200'] = df['popularity'].apply(lambda x: 1 if (x > 150) & (x <= 200) else 0)
df['popularity_cat_201_max'] = df['popularity'].apply(lambda x: 1 if (x >= 200) else 0)
df['_runtime_totalVotes_ratio'] = df['runtime'] / df['totalVotes']
df['_runtime_popularity_ratio'] = df['runtime'] / df['popularity']
#
df['_rating_theatrical_ratio'] = df['theatrical'] / df['rating']
df['_totalVotes_theatrical_ratio'] = df['theatrical'] / df['totalVotes']
df['_budget_mean_year'] = df['budget'] / df.groupby("release_year")["budget"].transform('mean')
df['_runtime_mean_year'] = df['runtime'] / df.groupby("release_year")["runtime"].transform('mean')
df['_rating_mean_year'] = df['rating'] / df.groupby("release_year")["rating"].transform('mean')
df['_totalVotes_mean_year'] = df['totalVotes'] / df.groupby("release_year")["totalVotes"].transform('mean')
###############################################################
# ๅฏนๆไบjsonๅฑๆง๏ผๅ
ทๆๅคไธชๅผ็๏ผ่ฟ่ก็ฑปไผผโone-hot็ผ็ โ
for col in ['genres', 'production_countries', 'spoken_languages', 'production_companies','Keywords']:
df[col] = df[col].map(lambda x: sorted(
list(set([n if n in train_dict[col] else col + '_etc' for n in [d['name'] for d in x]])))).map(
lambda x: ','.join(map(str, x)))
temp = df[col].str.get_dummies(sep=',')
df = pd.concat([df, temp], axis=1, sort=False)
# ๅ ้ค้ๆฐๅผๅฑๆงๅๆๆถๆชๆๅบๆ็จไฟกๆฏ็ๅฑๆง
df.drop(['genres_etc'], axis=1, inplace=True)
df = df.drop(['id', 'revenue', 'belongs_to_collection', 'genres', 'homepage', 'imdb_id', 'overview', 'runtime'
, 'poster_path', 'production_companies', 'production_countries', 'release_date', 'spoken_languages'
, 'status', 'title', 'Keywords', 'cast', 'crew', 'original_language', 'original_title', 'tagline',
'collection_id'
], axis=1)
# ๅกซๅ
็ผบๅคฑๅผ
df.fillna(value=0.0, inplace=True)
return df
# ๅฏนtrainไธญ็ๆไบๆฐๆฎๆๅจๅค็
# ๅค็ๅ
ๆฌbudgetใrevenue
# ๅฏนbudget่ฟๅฐไบrevenue็ๆ
ๅต็ป่ฎก๏ผๅฏนๅ
ถ่ฟ่กๅค็
# ๅค็ๅๅ๏ผๅฏนไบๅฏไปฅๆฅ่ฏขๅฐ็ไฟกๆฏ๏ผ่ฟ่ก็ๅฎๆฐๆฎๅกซๅ
๏ผๅฆๅๅๅฝๅนดๅๆๅ็ฑปๅ็ตๅฝฑ็ๅๅผ
train = pd.read_csv('train.csv')
train.loc[train['id'] == 16, 'revenue'] = 192864 # Skinning
train.loc[train['id'] == 90, 'budget'] = 30000000 # Sommersby
train.loc[train['id'] == 118, 'budget'] = 60000000 # Wild Hogs
train.loc[train['id'] == 149, 'budget'] = 18000000 # Beethoven
train.loc[train['id'] == 313, 'revenue'] = 12000000 # The Cookout
train.loc[train['id'] == 451, 'revenue'] = 12000000 # Chasing Liberty
train.loc[train['id'] == 464, 'budget'] = 20000000 # Parenthood
train.loc[train['id'] == 470, 'budget'] = 13000000 # The Karate Kid, Part II
train.loc[train['id'] == 513, 'budget'] = 930000 # From Prada to Nada
train.loc[train['id'] == 797, 'budget'] = 8000000 # Welcome to Dongmakgol
train.loc[train['id'] == 819, 'budget'] = 90000000 # Alvin and the Chipmunks: The Road Chip
train.loc[train['id'] == 850, 'budget'] = 90000000 # Modern Times
train.loc[train['id'] == 1007, 'budget'] = 2 # Zyzzyx Road
train.loc[train['id'] == 1112, 'budget'] = 7500000 # An Officer and a Gentleman
train.loc[train['id'] == 1131, 'budget'] = 4300000 # Smokey and the Bandit
train.loc[train['id'] == 1359, 'budget'] = 10000000 # Stir Crazy
train.loc[train['id'] == 1542, 'budget'] = 1 # All at Once
train.loc[train['id'] == 1570, 'budget'] = 15800000 # Crocodile Dundee II
train.loc[train['id'] == 1571, 'budget'] = 4000000 # Lady and the Tramp
train.loc[train['id'] == 1714, 'budget'] = 46000000 # The Recruit
train.loc[train['id'] == 1721, 'budget'] = 17500000 # Cocoon
train.loc[train['id'] == 1865, 'revenue'] = 25000000 # Scooby-Doo 2: Monsters Unleashed
train.loc[train['id'] == 1885, 'budget'] = 12 # In the Cut
train.loc[train['id'] == 2091, 'budget'] = 10 # Deadfall
train.loc[train['id'] == 2268, 'budget'] = 17500000 # Madea Goes to Jail budget
train.loc[train['id'] == 2491, 'budget'] = 6 # Never Talk to Strangers
train.loc[train['id'] == 2602, 'budget'] = 31000000 # Mr. Holland's Opus
train.loc[train['id'] == 2612, 'budget'] = 15000000 # Field of Dreams
train.loc[train['id'] == 2696, 'budget'] = 10000000 # Nurse 3-D
train.loc[train['id'] == 2801, 'budget'] = 10000000 # Fracture
train.loc[train['id'] == 335, 'budget'] = 2
train.loc[train['id'] == 348, 'budget'] = 12
train.loc[train['id'] == 470, 'budget'] = 13000000
train.loc[train['id'] == 513, 'budget'] = 1100000
train.loc[train['id'] == 640, 'budget'] = 6
train.loc[train['id'] == 696, 'budget'] = 1
train.loc[train['id'] == 797, 'budget'] = 8000000
train.loc[train['id'] == 850, 'budget'] = 1500000
train.loc[train['id'] == 1199, 'budget'] = 5
train.loc[train['id'] == 1282, 'budget'] = 9 # Death at a Funeral
train.loc[train['id'] == 1347, 'budget'] = 1
train.loc[train['id'] == 1755, 'budget'] = 2
train.loc[train['id'] == 1801, 'budget'] = 5
train.loc[train['id'] == 1918, 'budget'] = 592
train.loc[train['id'] == 2033, 'budget'] = 4
train.loc[train['id'] == 2118, 'budget'] = 344
train.loc[train['id'] == 2252, 'budget'] = 130
train.loc[train['id'] == 2256, 'budget'] = 1
train.loc[train['id'] == 2696, 'budget'] = 10000000
# testๅผๅธธๅค็
test = pd.read_csv('test.csv')
# Clean Data
test.loc[test['id'] == 6733, 'budget'] = 5000000
test.loc[test['id'] == 3889, 'budget'] = 15000000
test.loc[test['id'] == 6683, 'budget'] = 50000000
test.loc[test['id'] == 5704, 'budget'] = 4300000
test.loc[test['id'] == 6109, 'budget'] = 281756
test.loc[test['id'] == 7242, 'budget'] = 10000000
test.loc[test['id'] == 7021, 'budget'] = 17540562 # Two Is a Family
test.loc[test['id'] == 5591, 'budget'] = 4000000 # The Orphanage
test.loc[test['id'] == 4282, 'budget'] = 20000000 # Big Top Pee-wee
test.loc[test['id'] == 3033, 'budget'] = 250
test.loc[test['id'] == 3051, 'budget'] = 50
test.loc[test['id'] == 3084, 'budget'] = 337
test.loc[test['id'] == 3224, 'budget'] = 4
test.loc[test['id'] == 3594, 'budget'] = 25
test.loc[test['id'] == 3619, 'budget'] = 500
test.loc[test['id'] == 3831, 'budget'] = 3
test.loc[test['id'] == 3935, 'budget'] = 500
test.loc[test['id'] == 4049, 'budget'] = 995946
test.loc[test['id'] == 4424, 'budget'] = 3
test.loc[test['id'] == 4460, 'budget'] = 8
test.loc[test['id'] == 4555, 'budget'] = 1200000
test.loc[test['id'] == 4624, 'budget'] = 30
test.loc[test['id'] == 4645, 'budget'] = 500
test.loc[test['id'] == 4709, 'budget'] = 450
test.loc[test['id'] == 4839, 'budget'] = 7
test.loc[test['id'] == 3125, 'budget'] = 25
test.loc[test['id'] == 3142, 'budget'] = 1
test.loc[test['id'] == 3201, 'budget'] = 450
test.loc[test['id'] == 3222, 'budget'] = 6
test.loc[test['id'] == 3545, 'budget'] = 38
test.loc[test['id'] == 3670, 'budget'] = 18
test.loc[test['id'] == 3792, 'budget'] = 19
test.loc[test['id'] == 3881, 'budget'] = 7
test.loc[test['id'] == 3969, 'budget'] = 400
test.loc[test['id'] == 4196, 'budget'] = 6
test.loc[test['id'] == 4221, 'budget'] = 11
test.loc[test['id'] == 4222, 'budget'] = 500
test.loc[test['id'] == 4285, 'budget'] = 11
test.loc[test['id'] == 4319, 'budget'] = 1
test.loc[test['id'] == 4639, 'budget'] = 10
test.loc[test['id'] == 4719, 'budget'] = 45
test.loc[test['id'] == 4822, 'budget'] = 22
test.loc[test['id'] == 4829, 'budget'] = 20
test.loc[test['id'] == 4969, 'budget'] = 20
test.loc[test['id'] == 5021, 'budget'] = 40
test.loc[test['id'] == 5035, 'budget'] = 1
test.loc[test['id'] == 5063, 'budget'] = 14
test.loc[test['id'] == 5119, 'budget'] = 2
test.loc[test['id'] == 5214, 'budget'] = 30
test.loc[test['id'] == 5221, 'budget'] = 50
test.loc[test['id'] == 4903, 'budget'] = 15
test.loc[test['id'] == 4983, 'budget'] = 3
test.loc[test['id'] == 5102, 'budget'] = 28
test.loc[test['id'] == 5217, 'budget'] = 75
test.loc[test['id'] == 5224, 'budget'] = 3
test.loc[test['id'] == 5469, 'budget'] = 20
test.loc[test['id'] == 5840, 'budget'] = 1
test.loc[test['id'] == 5960, 'budget'] = 30
test.loc[test['id'] == 6506, 'budget'] = 11
test.loc[test['id'] == 6553, 'budget'] = 280
test.loc[test['id'] == 6561, 'budget'] = 7
test.loc[test['id'] == 6582, 'budget'] = 218
test.loc[test['id'] == 6638, 'budget'] = 5
test.loc[test['id'] == 6749, 'budget'] = 8
test.loc[test['id'] == 6759, 'budget'] = 50
test.loc[test['id'] == 6856, 'budget'] = 10
test.loc[test['id'] == 6858, 'budget'] = 100
test.loc[test['id'] == 6876, 'budget'] = 250
test.loc[test['id'] == 6972, 'budget'] = 1
test.loc[test['id'] == 7079, 'budget'] = 8000000
test.loc[test['id'] == 7150, 'budget'] = 118
test.loc[test['id'] == 6506, 'budget'] = 118
test.loc[test['id'] == 7225, 'budget'] = 6
test.loc[test['id'] == 7231, 'budget'] = 85
test.loc[test['id'] == 5222, 'budget'] = 5
test.loc[test['id'] == 5322, 'budget'] = 90
test.loc[test['id'] == 5350, 'budget'] = 70
test.loc[test['id'] == 5378, 'budget'] = 10
test.loc[test['id'] == 5545, 'budget'] = 80
test.loc[test['id'] == 5810, 'budget'] = 8
test.loc[test['id'] == 5926, 'budget'] = 300
test.loc[test['id'] == 5927, 'budget'] = 4
test.loc[test['id'] == 5986, 'budget'] = 1
test.loc[test['id'] == 6053, 'budget'] = 20
test.loc[test['id'] == 6104, 'budget'] = 1
test.loc[test['id'] == 6130, 'budget'] = 30
test.loc[test['id'] == 6301, 'budget'] = 150
test.loc[test['id'] == 6276, 'budget'] = 100
test.loc[test['id'] == 6473, 'budget'] = 100
test.loc[test['id'] == 6842, 'budget'] = 30
release_dates = pd.read_csv('release_dates_per_country.csv')
release_dates['id'] = range(1,7399)
release_dates.drop(['original_title','title'],axis = 1,inplace = True)
release_dates.index = release_dates['id']
train = pd.merge(train, release_dates, how='left', on=['id'])
test = | pd.merge(test, release_dates, how='left', on=['id']) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Poop analysis
Created 2020
@author: PClough
"""
import pandas as pd
import numpy as np
import chart_studio
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.subplots import make_subplots
from scipy import stats
import datetime as dt
from time import strptime
import calendar
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import vlc
df = pd.read_excel("Poo Data.xlsx", engine='openpyxl')
chart_studio.tools.set_credentials_file(username='YOUR USERNAME HERE', api_key='YOUR API HERE')
#%% Histogram of size of poos
# Replace sizes of 1, 2, and 3 in "size of poo?" heading to be small, medium and large
df['Size of poo? '].replace([1, 2, 3], ['Small', 'Medium', 'Poonarmi'], inplace = True)
fig = go.Figure()
fig.add_trace(go.Histogram(x = df['Size of poo? '],
name = 'Poop',
xbins = dict(
start = "Small",
),
marker_color = ('rgb(166,86,50)')))
fig.update_layout(
title_text = "Size of the poo poo's",
yaxis_title = "Count",
font = dict(size = 16))
plot(fig)
#%% Violin plot for day of week on x axis and type of poo on y axis
fig2 = go.Figure()
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# Remove 'Type ' before the number
df['Type of poop ๐ฉ? '] = df['Type of poop ๐ฉ? '].str.replace('Type ', '')
Date_column = df['When did the poo occur? '].dt.strftime("%a")
for day in days:
fig2.add_trace(go.Violin(x = Date_column[Date_column == day],
y = df['Type of poop ๐ฉ? '][Date_column == day],
name = day,
box_visible = True,
meanline_visible = True,
showlegend = False,
fillcolor = 'chocolate',
line = dict(color = 'DarkSalmon')))
fig2.update_layout(yaxis = dict(range=[0.5,7.5]), title = "Average poo type over whole year", font = dict(size = 16))
fig2.update_yaxes(ticks="inside", tick0 = 1, dtick = 1, title = "Bristol stool scale index")
plot(fig2)
# %% Ridgeline plot for day of week on x axis and type of poo on y axis
# 12 rows of data, one for each month
# 7 columns of data, averaging that months poo types
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Remove 'Type ' before the number
df['Type of poop ๐ฉ? '] = df['Type of poop ๐ฉ? '].str.replace('Type ', '')
New_Date_column = df['When did the poo occur? '].dt.strftime("%b")
i = 0
max_val = 0
data = np.zeros([12,100]) # the value of 100 is just massively oversizing it, assuming there will be less than 100 poo's of a single type in one month
for month in months:
for j in range(1,8):
data[i, np.sum(df['Type of poop ๐ฉ? '][New_Date_column == month] == str(j))] = j-1
if max_val < np.sum(df['Type of poop ๐ฉ? '][New_Date_column == month] == str(j)):
max_val = np.sum(df['Type of poop ๐ฉ? '][New_Date_column == month] == str(j))
i += 1
# Find where the furthest right hand datapoint is and then cut everything off after that
idx = np.arange(max_val+1, 100)
data = np.delete(data, idx, axis=1)
data[data == 0] = 'nan'
fig3 = go.Figure()
for data_line in data:
fig3.add_trace(go.Violin(x=data_line))
fig3.update_traces(orientation='h', side='positive', width=2, points=False)
fig3.update_layout(xaxis_showgrid=False,
xaxis_zeroline=False,
xaxis=dict(range=[0,8]),
title = "Average poo type over whole year",
font = dict(size = 16))
plot(fig3)
#%% Violin plot for day of week on x axis and type of poo on y axis broken out month by month
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
fig4 = make_subplots(rows=2, cols=6, shared_yaxes=True, subplot_titles=(months))
# Remove 'Type ' before the number
df['Type of poop ๐ฉ? '] = df['Type of poop ๐ฉ? '].str.replace('Type ', '')
Date_column = df['When did the poo occur? '].dt.strftime("%a")
row_num = 1
col_num = 0
for month in months:
col_num += 1
if col_num > 6:
col_num = 1
row_num = 2
for day in days:
fig4.add_trace(go.Violin(x = Date_column[Date_column == day][New_Date_column == month],
y = df['Type of poop ๐ฉ? '][Date_column == day][New_Date_column == month],
name = month + day,
box_visible = True,
meanline_visible = True,
showlegend = False,
fillcolor = 'chocolate',
line = dict(color = 'DarkSalmon')),
row = row_num, col = col_num)
fig4.update_layout(yaxis = dict(range=[0.5,7.5]), title = "Average poo type, broken down month-by-month", font = dict(size = 16))
fig4.update_yaxes(ticks="inside", col = 1, tick0 = 1, dtick = 1, title = "Bristol stool scale index")
fig4.update_xaxes(ticks="inside")
plot(fig4)
#%% scatter plot x axis = Time since last poo (delta t), y axis (Size of poo)
# Return the number of hours from a timedelta
def days_hours_minutes(td):
return td.days*24 + td.seconds//3600 + (td.seconds//60)%60/60
d = {'When did the poo occur?': df['When did the poo occur? '], 'Size of poo?': df['Size of poo? '], 'time_since_last_poo': pd.Timedelta(0, unit='h')}
scatterplot_df = pd.DataFrame(data=d)
scatterplot_df = scatterplot_df.sort_values(by = ['When did the poo occur?']).reset_index(drop=True)
for i in range(1, len(df['When did the poo occur? '])-1):
scatterplot_df.loc[i, 'time_since_last_poo'] = days_hours_minutes(scatterplot_df['When did the poo occur?'][i] - scatterplot_df['When did the poo occur?'][i-1])
scatterplot_df.loc[0, 'time_since_last_poo'] = 0
scatterplot_df.loc[scatterplot_df['time_since_last_poo'].last_valid_index(), 'time_since_last_poo'] = 0
# Correlation line
dataforfitline = np.zeros([np.size(scatterplot_df,0), 1])
j = 0
for i in scatterplot_df['Size of poo?']:
if i == 'Small':
dataforfitline[j] = 1
if i == 'Medium':
dataforfitline[j] = 2
if i == 'Poonarmi':
dataforfitline[j] = 3
j += 1
dataforfitline2 = pd.DataFrame(data = scatterplot_df['time_since_last_poo'])
dataforfitline2[1] = dataforfitline
dataforfitline2 = dataforfitline2.sort_values(by = ['time_since_last_poo']).reset_index(drop=True)
slope, intercept, r_value, p_value, std_err = stats.linregress(dataforfitline2.astype(float))
line = slope*scatterplot_df['time_since_last_poo'] + intercept
fig5 = go.Figure(data=go.Scatter(x = scatterplot_df['time_since_last_poo'],
# y = scatterplot_df['Size of poo?'],
y = dataforfitline2[1],
mode = 'markers',
text = scatterplot_df['When did the poo occur?'],
name = 'Poops',
hovertemplate = "%{text}"))
fig5.add_trace(go.Scatter(x = scatterplot_df['time_since_last_poo'], y = line, mode = 'lines', name = 'R\u00b2 = ' + round(r_value**2,2).astype(str)))
fig5.update_xaxes(title_text="Hours since last poop")
fig5.update_yaxes(title_text="Size of poop")
fig5.update_layout(title = "Correlation between time since last poo and size of poo", font = dict(size = 16))
plot(fig5)
#%% scatter plot x axis = Time since las poo (delta t), y axis (Type of poo)
d2 = {'When did the poo occur?': df['When did the poo occur? '], 'Type of poo?': df['Type of poop ๐ฉ? '], 'time_since_last_poo': pd. | Timedelta(0, unit='h') | pandas.Timedelta |
import pandas as pd
from pandas.testing import assert_frame_equal
from evaluate.report import (
PrecisionReport,
RecallReport,
Report,
DelimNotFoundError,
ReturnTypeDoesNotMatchError
)
from evaluate.classification import AlignmentAssessment
import pytest
from io import StringIO
import math
from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row
from unittest.mock import patch
class TestReport:
def test___get_report_satisfying_confidence_threshold(self):
report = Report([
pd.read_csv(StringIO(
"""id,GT_CONF
0,2
1,1
2,3
""")),
pd.read_csv(StringIO(
"""id,GT_CONF
4,3
5,1
6,2
"""))
])
actual_report = report.get_report_satisfying_confidence_threshold(2)
expected_report = Report([
pd.read_csv(StringIO(
"""id,GT_CONF
0,2
2,3
4,3
6,2
"""))])
assert actual_report==expected_report
def test___get_value_from_header_fast___field_is_in_header(self):
actual_value = Report.get_value_from_header_fast("FIELD_1=10;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_in_header_between_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_first_before_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("FIELD_1=10;DUMMY_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_last_after_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;DUMMY_2=99;FIELD_1=10;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_not_in_header(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;", "FIELD_2", int, -1, delim=";")
expected_value = -1
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_in_header___return_type_does_not_match(self):
with pytest.raises(ReturnTypeDoesNotMatchError):
Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
def test___get_value_from_header_fast___field_is_in_header___delim_is_not(self):
with pytest.raises(DelimNotFoundError):
Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim="~")
def test____create_field_from_header(self):
report = Report([
pd.read_csv(StringIO(
"""id,header
1,SEQ=ACGT;LEN=4;
2,SEQ=TG;LEN=2;
3,dummy
"""))])
report._create_field_from_header("SEQ", "header", str, "A")
report._create_field_from_header("LEN", "header", int, 1)
expected_report = Report([
pd.read_csv(StringIO(
"""id,header,SEQ,LEN
1,SEQ=ACGT;LEN=4;,ACGT,4
2,SEQ=TG;LEN=2;,TG,2
3,dummy,A,1
"""))])
assert report==expected_report
def test____create_good_eval_column(self):
report = Report([
pd.read_csv(StringIO(
"""classification
primary_correct
whatever
secondary_correct
dummy
supplementary_correct
woot
"""))])
report._create_good_eval_column()
expected_report = Report([
pd.read_csv(StringIO(
"""classification,good_eval
primary_correct,True
whatever,False
secondary_correct,True
dummy,False
supplementary_correct,True
woot,False
"""))])
assert report==expected_report
def test_getMaximumGtConf_no_gt_conf_columnRaisesKeyError(self):
report = Report([pd.DataFrame()])
with pytest.raises(KeyError):
report.get_maximum_gt_conf()
def test_getMaximumGtConf_emptyReportReturnsNaN(self):
report = Report([pd.DataFrame(data={"GT_CONF": []})])
actual = report.get_maximum_gt_conf()
assert math.isnan(actual)
def test_getMaximumGtConf_oneGTConfInReportReturnsGTConf(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5]})])
actual = report.get_maximum_gt_conf()
expected = 1.5
assert actual == expected
def test_getMaximumGtConf_threeGTConfsInReportReturnsHighest(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5, 10.5, 5.0]})])
actual = report.get_maximum_gt_conf()
expected = 10.5
assert actual == expected
def test_getMinimumGtConf_no_gt_conf_columnRaisesKeyError(self):
report = Report([pd.DataFrame()])
with pytest.raises(KeyError):
report.get_minimum_gt_conf()
def test_getMinimumGtConf_emptyReportReturnsNaN(self):
report = Report([pd.DataFrame(data={"GT_CONF": []})])
actual = report.get_minimum_gt_conf()
assert math.isnan(actual)
def test_getMinimumGtConf_oneGTConfInReportReturnsGTConf(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5]})])
actual = report.get_minimum_gt_conf()
expected = 1.5
assert actual == expected
def test_getMinimumGtConf_threeGTConfsInReportReturnsHighest(self):
report = Report([pd.DataFrame(data={"GT_CONF": [10.5, 5.0, 0.2]})])
actual = report.get_minimum_gt_conf()
expected = 0.2
assert actual == expected
class TestPrecisionReporter:
def test_init_gtconfIsExtractedCorrectly(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
dfs = pd.DataFrame(
data=[
create_precision_report_row(0.0, gt_conf=100),
create_precision_report_row(0.0, gt_conf=100),
create_precision_report_row(0.0, gt_conf=10),
create_precision_report_row(0.0, gt_conf=100),
],
columns=columns,
)
report = PrecisionReport([dfs])
actual = report.report.GT_CONF
expected = pd.Series([100.0, 100.0, 10.0, 100.0])
assert actual.equals(expected)
def test_fromFiles_TwoFilesReturnsValidRecallReport(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3; >GT_CONF=3; unmapped
"""
contents_2 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1260;IV=[50,60);PVID=4;NB_ALL=4;ALL_ID=4;NB_DIFF_ALL_SEQ=4;ALL_SEQ_ID=4; >CHROM=GC00000578_3;SAMPLE=CFT073;POS=165;IV=[25,29);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=3;GT_CONF=3.22199; primary_incorrect
CFT073 >CHROM=1;POS=1262;IV=[60,70);PVID=5;NB_ALL=5;ALL_ID=5;NB_DIFF_ALL_SEQ=5;ALL_SEQ_ID=5; >GT_CONF=5; unmapped
CFT073 >CHROM=1;POS=1281;IV=[70,80);PVID=6;NB_ALL=6;ALL_ID=6;NB_DIFF_ALL_SEQ=6;ALL_SEQ_ID=6; >GT_CONF=6; unmapped
"""
path_1 = create_tmp_file(contents_1)
path_2 = create_tmp_file(contents_2)
contents_1_input = StringIO(contents_1)
contents_2_input = StringIO(contents_2)
dataframes = [
pd.read_csv(contents_1_input, sep="\t", keep_default_na=False),
pd.read_csv(contents_2_input, sep="\t", keep_default_na=False),
]
actual = PrecisionReport.from_files([path_1, path_2])
expected = PrecisionReport(dataframes)
path_1.unlink()
path_2.unlink()
assert actual == expected
class TestRecallReport:
def test_fromFiles_TwoFilesReturnsValidRecallReport(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3; >GT_CONF=3; unmapped
"""
contents_2 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1260;IV=[50,60);PVID=4;NB_ALL=4;ALL_ID=4;NB_DIFF_ALL_SEQ=4;ALL_SEQ_ID=4; >CHROM=GC00000578_3;SAMPLE=CFT073;POS=165;IV=[25,29);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=3;GT_CONF=3.22199; primary_incorrect
CFT073 >CHROM=1;POS=1262;IV=[60,70);PVID=5;NB_ALL=5;ALL_ID=5;NB_DIFF_ALL_SEQ=5;ALL_SEQ_ID=5; >GT_CONF=5; unmapped
CFT073 >CHROM=1;POS=1281;IV=[70,80);PVID=6;NB_ALL=6;ALL_ID=6;NB_DIFF_ALL_SEQ=6;ALL_SEQ_ID=6; >GT_CONF=6; unmapped
"""
path_1 = create_tmp_file(contents_1)
path_2 = create_tmp_file(contents_2)
contents_1_input = StringIO(contents_1)
contents_2_input = StringIO(contents_2)
dataframes = [
pd.read_csv(contents_1_input, sep="\t", keep_default_na=False),
pd.read_csv(contents_2_input, sep="\t", keep_default_na=False),
]
actual = RecallReport.from_files([path_1, path_2])
expected = RecallReport(dataframes)
path_1.unlink()
path_2.unlink()
assert actual == expected
def test_init(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1;NB_OF_SAMPLES=10; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2;NB_OF_SAMPLES=20; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3;NB_OF_SAMPLES=30; >GT_CONF=3; unmapped
"""
contents_1_input = StringIO(contents_1)
dataframes = [pd.read_csv(contents_1_input, sep="\t", keep_default_na=False)]
report = RecallReport(dataframes)
actual_df = report.report
expected_df = pd.read_csv(StringIO(
"""sample query_probe_header ref_probe_header classification GT_CONF PVID NB_ALL ALL_ID NB_DIFF_ALL_SEQ ALL_SEQ_ID NB_OF_SAMPLES good_eval
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1;NB_OF_SAMPLES=10; >GT_CONF=1; unmapped 1.0 1 1 1 1 1 10 False
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2;NB_OF_SAMPLES=20; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct 60.1133 2 2 2 2 2 20 True
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3;NB_OF_SAMPLES=30; >GT_CONF=3; unmapped 3.0 3 3 3 3 3 30 False
"""), sep="\t")
assert actual_df.equals(expected_df)
def test_checkIfOnlyBestMappingIsKept_hasPrimaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasSecondaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasSupplementaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_CORRECT, gt_conf=100, with_gt_conf=True)])
| assert_frame_equal(actual, expected, check_dtype=False) | pandas.testing.assert_frame_equal |
import pandas as pd
from pandas.testing import assert_frame_equal
from evaluate.report import (
PrecisionReport,
RecallReport,
Report,
DelimNotFoundError,
ReturnTypeDoesNotMatchError
)
from evaluate.classification import AlignmentAssessment
import pytest
from io import StringIO
import math
from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row
from unittest.mock import patch
class TestReport:
def test___get_report_satisfying_confidence_threshold(self):
report = Report([
pd.read_csv(StringIO(
"""id,GT_CONF
0,2
1,1
2,3
""")),
pd.read_csv(StringIO(
"""id,GT_CONF
4,3
5,1
6,2
"""))
])
actual_report = report.get_report_satisfying_confidence_threshold(2)
expected_report = Report([
pd.read_csv(StringIO(
"""id,GT_CONF
0,2
2,3
4,3
6,2
"""))])
assert actual_report==expected_report
def test___get_value_from_header_fast___field_is_in_header(self):
actual_value = Report.get_value_from_header_fast("FIELD_1=10;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_in_header_between_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_first_before_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("FIELD_1=10;DUMMY_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_last_after_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;DUMMY_2=99;FIELD_1=10;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_not_in_header(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;", "FIELD_2", int, -1, delim=";")
expected_value = -1
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_in_header___return_type_does_not_match(self):
with pytest.raises(ReturnTypeDoesNotMatchError):
Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
def test___get_value_from_header_fast___field_is_in_header___delim_is_not(self):
with pytest.raises(DelimNotFoundError):
Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim="~")
def test____create_field_from_header(self):
report = Report([
pd.read_csv(StringIO(
"""id,header
1,SEQ=ACGT;LEN=4;
2,SEQ=TG;LEN=2;
3,dummy
"""))])
report._create_field_from_header("SEQ", "header", str, "A")
report._create_field_from_header("LEN", "header", int, 1)
expected_report = Report([
pd.read_csv(StringIO(
"""id,header,SEQ,LEN
1,SEQ=ACGT;LEN=4;,ACGT,4
2,SEQ=TG;LEN=2;,TG,2
3,dummy,A,1
"""))])
assert report==expected_report
def test____create_good_eval_column(self):
report = Report([
pd.read_csv(StringIO(
"""classification
primary_correct
whatever
secondary_correct
dummy
supplementary_correct
woot
"""))])
report._create_good_eval_column()
expected_report = Report([
pd.read_csv(StringIO(
"""classification,good_eval
primary_correct,True
whatever,False
secondary_correct,True
dummy,False
supplementary_correct,True
woot,False
"""))])
assert report==expected_report
def test_getMaximumGtConf_no_gt_conf_columnRaisesKeyError(self):
report = Report([pd.DataFrame()])
with pytest.raises(KeyError):
report.get_maximum_gt_conf()
def test_getMaximumGtConf_emptyReportReturnsNaN(self):
report = Report([pd.DataFrame(data={"GT_CONF": []})])
actual = report.get_maximum_gt_conf()
assert math.isnan(actual)
def test_getMaximumGtConf_oneGTConfInReportReturnsGTConf(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5]})])
actual = report.get_maximum_gt_conf()
expected = 1.5
assert actual == expected
def test_getMaximumGtConf_threeGTConfsInReportReturnsHighest(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5, 10.5, 5.0]})])
actual = report.get_maximum_gt_conf()
expected = 10.5
assert actual == expected
def test_getMinimumGtConf_no_gt_conf_columnRaisesKeyError(self):
report = Report([pd.DataFrame()])
with pytest.raises(KeyError):
report.get_minimum_gt_conf()
def test_getMinimumGtConf_emptyReportReturnsNaN(self):
report = Report([pd.DataFrame(data={"GT_CONF": []})])
actual = report.get_minimum_gt_conf()
assert math.isnan(actual)
def test_getMinimumGtConf_oneGTConfInReportReturnsGTConf(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5]})])
actual = report.get_minimum_gt_conf()
expected = 1.5
assert actual == expected
def test_getMinimumGtConf_threeGTConfsInReportReturnsHighest(self):
report = Report([pd.DataFrame(data={"GT_CONF": [10.5, 5.0, 0.2]})])
actual = report.get_minimum_gt_conf()
expected = 0.2
assert actual == expected
class TestPrecisionReporter:
def test_init_gtconfIsExtractedCorrectly(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
dfs = pd.DataFrame(
data=[
create_precision_report_row(0.0, gt_conf=100),
create_precision_report_row(0.0, gt_conf=100),
create_precision_report_row(0.0, gt_conf=10),
create_precision_report_row(0.0, gt_conf=100),
],
columns=columns,
)
report = PrecisionReport([dfs])
actual = report.report.GT_CONF
expected = pd.Series([100.0, 100.0, 10.0, 100.0])
assert actual.equals(expected)
def test_fromFiles_TwoFilesReturnsValidRecallReport(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3; >GT_CONF=3; unmapped
"""
contents_2 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1260;IV=[50,60);PVID=4;NB_ALL=4;ALL_ID=4;NB_DIFF_ALL_SEQ=4;ALL_SEQ_ID=4; >CHROM=GC00000578_3;SAMPLE=CFT073;POS=165;IV=[25,29);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=3;GT_CONF=3.22199; primary_incorrect
CFT073 >CHROM=1;POS=1262;IV=[60,70);PVID=5;NB_ALL=5;ALL_ID=5;NB_DIFF_ALL_SEQ=5;ALL_SEQ_ID=5; >GT_CONF=5; unmapped
CFT073 >CHROM=1;POS=1281;IV=[70,80);PVID=6;NB_ALL=6;ALL_ID=6;NB_DIFF_ALL_SEQ=6;ALL_SEQ_ID=6; >GT_CONF=6; unmapped
"""
path_1 = create_tmp_file(contents_1)
path_2 = create_tmp_file(contents_2)
contents_1_input = StringIO(contents_1)
contents_2_input = StringIO(contents_2)
dataframes = [
| pd.read_csv(contents_1_input, sep="\t", keep_default_na=False) | pandas.read_csv |
import os
import sys
from os import path
import argparse
import subprocess
#import logging
import threading
import time
from datetime import datetime
import shutil
import numpy as np
import pandas as pd
import win32com.client as win32
import pythoncom
from file_read_backwards import FileReadBackwards
import EFT_Tools as tools
class processEFTThread(threading.Thread):
def __init__(self, fileName, outdir, locations, years,
euroClasses=[99,0,1,2,3,4,5,6],
vehsplit="Alternative Technologies",
weights='all', techs='all',
keepTempFiles=False, saveFile=None, completed=None):
"""The threading class for processEFT."""
threading.Thread.__init__(self)
self.fileName = fileName
self.outdir = outdir
self.locations = locations
self.years = years
self.euroClasses = euroClasses
self.vehsplit = vehsplit
self.weights = weights
self.techs = techs
self.keepTempFiles = keepTempFiles
self.saveFile = saveFile
self.completed = completed
def run(self):
pythoncom.CoInitialize()
try:
processEFT(self.fileName, self.outdir, self.locations, self.years,
euroClasses=self.euroClasses,
vehsplit=self.vehsplit,
weights=self.weights,
techs=self.techs,
keepTempFiles=self.keepTempFiles,
saveFile=self.saveFile,
completed=self.completed)
except:
sys.excepthook(*sys.exc_info())
def processEFT(fileName, outdir, locations, years,
euroClasses=[99,0,1,2,3,4,5,6],
vehsplit="Alternative Technologies",
weights='all', techs='all',
keepTempFiles=False, saveFile=None, completed=None):
loggerM = logger.getChild('processEFT')
if type(years) is not list:
years = [years]
if type(locations) is not list:
locations = [locations]
if type(euroClasses) is not list:
euroClasses = [euroClasses]
if completed is None:
completed = pd.DataFrame(columns=['area', 'year', 'euro', 'tech', 'saveloc', 'busmode', 'weight'])
# Get the files are ready for processing.
ahk_ahkpathG, fileNames, versions, versionsForOutput = tools.prepareToExtract(fileName, locations)
fileName = fileNames[0]
version = versions[0]
versionForOutPut = versionsForOutput[0]
details = tools.versionDetails[version]
# Create the Excel Application object.
try:
excel = win32.gencache.EnsureDispatch('Excel.Application')
except TypeError:
time.sleep(5)
excel = win32.gencache.EnsureDispatch('Excel.Application')
excel.DisplayAlerts = False
# Make a temporary copy of the filename, so that we do no processing on the
# original. Just in case we break it. Also define temporary file names and
# output save locations, etc.
[oP, FN] = path.split(fileName)
tempdir = path.join(outdir, 'temp')
fileNameT = path.join(tempdir, FN)
try:
shutil.copyfile(fileName, fileNameT)
except PermissionError as err:
# File already exists and is open. Probably because process was cancelled
# or failed without closing it. Check with the user and then close the
# excel application.
QuitExcel = input(('It looks like the required excel document is already '
'open. Would you like python to quit all excel documents '
'and proceed?'))
if QuitExcel.lower() in ['yes', 'y']:
excel.Quit()
# Reopen it.
time.sleep(5)
excel = win32.gencache.EnsureDispatch('Excel.Application')
excel.DisplayAlerts = False
else:
raise err
NO2FEU = tools.readNO2Factors(mode='ByEuro')
NO2FRT = tools.readNO2Factors(mode='ByRoadType')
#first = True
tempFilesCreated = [fileNameT]
BusesOptions = [True, False]
techOptions = ['All']
if techs == 'all':
techOptions.extend(tools.euroClassTechnologies)
if weights == 'all':
weights = list(range(max(np.array(details['weightRowEnds']) - np.array(details['weightRowStarts'])) + 1))
weights.insert(0, 99)
else:
weights = [99]
vehsToSkipSt5 = ['Rigid HGV', 'Artic HGV', 'Bus and Coach',
'B100 Rigid HGV', 'B100 Artic HGV', 'B100 Bus',
'Hybrid Bus', 'B100 Coach']
# Check that all euro class names are understood.
if path.isfile(tools.ahk_exepath):
subprocess.Popen([tools.ahk_exepath, ahk_ahkpathG])
wb = excel.Workbooks.Open(fileNameT)
excel.Visible = True
tools.checkEuroClassesValid(wb, details['vehRowStartsMC'], details['vehRowEndsMC'],
tools.EuroClassNameColumnsMC, Type=1, logger=loggerM)
tools.checkEuroClassesValid(wb, details['vehRowStartsHB'], details['vehRowEndsHB'],
tools.EuroClassNameColumnsMC, Type=2, logger=loggerM)
tools.checkEuroClassesValid(wb, details['vehRowStarts'], details['vehRowEnds'],
tools.EuroClassNameColumns, Type=0, logger=loggerM)
wb.Close(True)
for loci, location in enumerate(locations):
loggerM.info(('{:02d} Beginning processing for location {} of '
'{}: "{}".').format(loci+1, loci+1, len(locations), location))
matchingRow = completed[(completed['area'] == location) &
(completed['year'] == -9) &
(completed['euro'] == -9) &
(completed['tech'] == 'NA') &
(completed['busmode'] == 'NA') &
(completed['weight'] == -9)].index.tolist()
if len(matchingRow) > 0:
loggerM.info(('{:02d} Processing for these '
'specifications has already been completed.').format(
loci+1))
continue
for yeari, year in enumerate(years):
loggerM.info(('{:02d} {:02d} Beginning processing for year {} of '
'{}: "{}".').format(loci+1, yeari+1, yeari+1, len(years), year))
# See if this is already completed.
matchingRow = completed[(completed['area'] == location) &
(completed['year'] == year) &
(completed['euro'] == -9) &
(completed['tech'] == 'NA') &
(completed['busmode'] == 'NA') &
(completed['weight'] == -9)].index.tolist()
if len(matchingRow) > 0:
loggerM.info(('{:02d} {:02d} Processing for these '
'specifications has already been completed.').format(
loci+1, yeari+1))
continue
for euroi, euroClass in enumerate(euroClasses):
loggerM.info(('{:02d} {:02d} {:02d} Beginning processing for '
'euroclass {} of {}: "{}".').format(loci+1, yeari+1, euroi+1,
euroi+1, len(euroClasses),
euroClass))
# See if this is already completed.
matchingRow = completed[(completed['area'] == location) &
(completed['year'] == year) &
(completed['euro'] == euroClass) &
(completed['tech'] == 'NA') &
(completed['busmode'] == 'NA') &
(completed['weight'] == -9)].index.tolist()
if len(matchingRow) > 0:
loggerM.info(('{:02d} {:02d} {:02d} Processing for these '
'specifications has already been completed.').format(
loci+1, yeari+1, euroi+1))
continue
if euroClass == 99:
# Euro class of euro 99 means use default mix, and default mix of tech.
loggerM.info(('{:02d} {:02d} {:02d} Euro class of 99 specifies using '
'default euro mix, and default tech.').format(loci+1, yeari+1, euroi+1))
techs = ['All']
else:
techs = techOptions
for techi, tech in enumerate(techs):
loggerM.info(('{:02d} {:02d} {:02d} {:02d} Beginning processing for technology '
'{} of {}: "{}".').format(loci+1, yeari+1, euroi+1, techi+1,
techi+1, len(techs), tech))
checkkill(excel)
# See if this is already completed.
matchingRow = completed[(completed['area'] == location) &
(completed['year'] == year) &
(completed['euro'] == euroClass) &
(completed['tech'] == tech) &
(completed['busmode'] == 'NA') &
(completed['weight'] == -9)].index.tolist()
if len(matchingRow) > 0:
completedfile = completed.loc[matchingRow[0]]['saveloc']
if completedfile == 'No File':
loggerM.info(('{:02d} {:02d} {:02d} {:02d} Processing for these '
'specifications has previously been skipped.').format(
loci+1, yeari+1, euroi+1, techi+1))
elif completedfile == 'MULTI':
loggerM.info(('{:02d} {:02d} {:02d} {:02d} Processing for these '
'specifications has already been completed.').format(
loci+1, yeari+1, euroi+1, techi+1))
loggerM.info(('{:02d} {:02d} {:02d} {:02d} Results saved in multiple files.').format(
loci+1, yeari+1, euroi+1, techi+1))
else:
[oP, FNC] = path.split(completedfile)
loggerM.info(('{:02d} {:02d} {:02d} {:02d} Processing for these '
'specifications has already been completed.').format(
loci+1, yeari+1, euroi+1, techi+1))
loggerM.info(('{:02d} {:02d} {:02d} {:02d} Results saved in {}.').format(
loci+1, yeari+1, euroi+1, techi+1, FNC))
continue
# Assign save locations.
#outputFileCSVinPrep = path.join(tempdir, '{}_{:04d}_{:02d}_{}_InPrep.csv'.format(location, year, euroClass, tech))
#outputFileCSV = path.join(outdir, '{}_{:04d}_{:02d}_{}.csv'.format(location, year, euroClass, tech))
#first = True
# Check to see if this technology is available for this euro class.
if tech not in tools.euroClassNameVariations[euroClass].keys():
loggerM.info('{:02d} {:02d} {:02d} {:02d} Not available for this euro class.'.format(loci+1, yeari+1, euroi+1, techi+1))
loggerM.info('{:02d} {:02d} {:02d} {:02d} SKIPPED (area, year, euro, tech, saveloc): {}, {}, {}, {}, {}.'.format(loci+1, yeari+1, euroi+1, techi+1, location, year, euroClass, tech, 'No File'))
continue
for doBus in BusesOptions:
if doBus is None:
BC = '--'
pass
elif doBus:
BC = 'BC'
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} Buses and coaches.'.format(loci+1, yeari+1, euroi+1, techi+1, BC))
if tech in ['c', 'd']:
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} Not available for technology {}.'.format(loci+1, yeari+1, euroi+1, techi+1, tech, BC))
continue
elif (euroClass in [5]) and (tech == 'Standard'):
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} Not applicable for technology {} for euro class {}.'.format(loci+1, yeari+1, euroi+1, techi+1, tech, euroClass, BC))
continue
else:
BC = 'NB'
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} All vehicles except buses and coaches.'.format(loci+1, yeari+1, euroi+1, techi+1, BC))
for weighti, weight in enumerate(weights):
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} {:02d} Beginning processing for weight row {} of {}.'.format(loci+1, yeari+1, euroi+1, techi+1, BC, weighti+1, weighti+1, len(weights)))
# See if this is already completed.
matchingRow = completed[(completed['area'] == location) &
(completed['year'] == year) &
(completed['euro'] == euroClass) &
(completed['tech'] == tech) &
(completed['busmode'] == BC) &
(completed['weight'] == weighti+1)].index.tolist()
if len(matchingRow) > 0:
completedfile = completed.loc[matchingRow[0]]['saveloc']
[oP, FNC] = path.split(completedfile)
loggerM.info(('{:02d} {:02d} {:02d} {:02d} {} {:02d} Processing for these '
'specifications has already been completed.').format(
loci+1, yeari+1, euroi+1, techi+1, BC, weighti+1))
loggerM.info(('{:02d} {:02d} {:02d} {:02d} {} {:02d} Results saved in {}.').format(
loci+1, yeari+1, euroi+1, techi+1, BC, weighti+1, FNC))
continue
outputFileCSV = path.join(outdir, '{}_{:04d}_{:02d}_{}_{}_{}.csv'.format(location, year, euroClass, tech.replace(' ', '_'), BC, weighti))
checkkill(excel)
if weight == 99:
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} {:02d} Weight row 99 specifies using the default weight mix.'.format(loci+1, yeari+1, euroi+1, techi+1, BC, weighti+1))
if doBus is None:
# Extract buses and coaches together, i.e. extract them, along
# with all other vehicles and don't treat them any differenctly.
vehs2Skip = ['Taxi (black cab)']
if (euroClass in [5]) and (tech == 'Standard'):
vehs2Skip = vehs2Skip + vehsToSkipSt5
excel, newSavedFile, b, k, weightclassnames, gotTechs = tools.prepareAndRun(
fileNameT, vehsplit, details, location, year, euroClass,
tools.ahk_exepath, ahk_ahkpathG, versionForOutPut,
tech=tech, sizeRow=weight, DoHybridBus=True, DoBusCoach=True,
excel=excel, vehiclesToSkip=vehs2Skip, logger=loggerM)
checkkill(excel)
if newSavedFile is None:
output = None
else:
tempFilesCreated.append(newSavedFile)
# Now get the output values as a dataframe.
output = tools.extractOutput(newSavedFile, versionForOutPut, year, location, euroClass, details, techDetails=[tech, gotTechs])
elif not doBus:
vehs2Skip = ['Taxi (black cab)', 'Bus and Coach', 'B100 Bus',
'CNG Bus', 'Biomethane Bus', 'Biogas Bus',
'Hybrid Bus', 'FCEV Bus', 'B100 Coach']
if (euroClass in [5]) and (tech == 'Standard'):
vehs2Skip = vehs2Skip + vehsToSkipSt5
# Extract all vehicles except buses and coaches.
excel, newSavedFile, b, k, weightclassnames, gotTechs = tools.prepareAndRun(
fileNameT, vehsplit, details, location, year, euroClass,
tools.ahk_exepath, ahk_ahkpathG, versionForOutPut,
tech=tech, sizeRow=weight, DoHybridBus=False, DoBusCoach=False,
excel=excel, vehiclesToSkip=vehs2Skip, logger=loggerM)
checkkill(excel)
if newSavedFile is None:
output = None
else:
tempFilesCreated.append(newSavedFile)
# Now get the output values as a dataframe.
output = tools.extractOutput(newSavedFile, versionForOutPut, year, location, euroClass, details, techDetails=[tech, gotTechs])
# Add weight details.
output['weight'] = 'None'
for vehclass, wcn in weightclassnames.items():
for vehclass2 in tools.in2outVeh[vehclass]:
output.loc[output.vehicle == vehclass2, 'weight'] = '{} - {}'.format(vehclass, wcn)
else:
# Extract only buses and coaches, and split them.
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} {:02d} Buses...'.format(loci+1, yeari+1, euroi+1, techi+1, BC, weighti+1))
excel, newSavedFileBus, b, busCoachRatio, weightclassnames, gotTechsB = tools.prepareAndRun(
fileNameT, vehsplit, details, location, year, euroClass,
tools.ahk_exepath, ahk_ahkpathG, versionForOutPut,
tech=tech, sizeRow=weight, DoHybridBus=True, DoBusCoach=True,
DoMCycles=False, excel=excel, busCoach='bus', logger=loggerM)
checkkill(excel)
if newSavedFileBus is None:
gotBus = False
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} {:02d} No buses for this weight class.'.format(loci+1, yeari+1, euroi+1, techi+1, BC, weighti+1))
else:
tempFilesCreated.append(newSavedFileBus)
outputBus = tools.extractOutput(newSavedFileBus, versionForOutPut, year, location, euroClass, details, techDetails=[tech, gotTechsB])
outputBus = outputBus.loc[[x in ['B100 Bus', 'Bus and Coach', 'Hybrid Bus'] for x in outputBus['vehicle']]]
outputBus.loc[outputBus.vehicle == 'Bus and Coach', 'vehicle'] = 'Bus'
outputBus['weight'] = 'Bus - {}'.format(weightclassnames['Bus'])
gotBus = True
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} {:02d} Coaches...'.format(loci+1, yeari+1, euroi+1, techi+1, BC, weighti+1))
excel, newSavedFileCoa, b, busCoachRatio, weightclassnames, gotTechsC = tools.prepareAndRun(
fileNameT, vehsplit, details, location, year, euroClass,
tools.ahk_exepath, ahk_ahkpathG, versionForOutPut,
tech=tech, sizeRow=weight, DoHybridBus=False, DoBusCoach=True,
DoMCycles=False, excel=excel, busCoach='coach', logger=loggerM)
checkkill(excel)
if newSavedFileCoa is None:
gotCoach = False
loggerM.info('{:02d} {:02d} {:02d} {:02d} {} {:02d} No coaches for this weight class.'.format(loci+1, yeari+1, euroi+1, techi+1, BC, weighti+1))
else:
tempFilesCreated.append(newSavedFileCoa)
outputCoa = tools.extractOutput(newSavedFileCoa, versionForOutPut, year, location, euroClass, details, techDetails=[tech, gotTechsC])
outputCoa = outputCoa.loc[[x in ['B100 Coach', 'Bus and Coach'] for x in outputCoa['vehicle']]]
outputCoa.loc[outputCoa.vehicle == 'Bus and Coach', 'vehicle'] = 'Coach'
outputCoa['weight'] = 'Coach - {}'.format(weightclassnames['Coach'])
gotCoach = True
if gotBus and gotCoach:
output = | pd.concat([outputBus, outputCoa], axis=0) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 04:11:27 2017
@author: konodera
nohup python -u 501_concat.py &
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
import multiprocessing as mp
import gc
import utils
utils.start(__file__)
#==============================================================================
# def
#==============================================================================
def user_feature(df, name):
if 'train' in name:
name_ = 'trainT-0'
elif name == 'test':
name_ = 'test'
df = pd.merge(df, pd.read_pickle('../feature/{}/f101_order.p'.format(name_)),# same
on='order_id', how='left')
# timezone
df = pd.merge(df, | pd.read_pickle('../input/mk/timezone.p') | pandas.read_pickle |
import copy
import inspect
import json
import os
import numpy as np
import pandas as pd
import pytest
from solarforecastarbiter.datamodel import Site, Observation
TEST_DATA_DIR = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
def site_dicts():
return [copy.deepcopy(site) for site in [
{
'name': 'site',
'latitude': 1,
'longitude': 1,
'elevation': 5,
'timezone': 'Etc/GMT+8',
'extra_parameters': {"network": "DOE ARM",
"network_api_id": 'qcradlong1',
"network_api_abbreviation": 'abbrv',
"observation_interval_length": 1},
},
{
'name': 'site2',
'latitude': 2,
'longitude': 2,
'elevation': 5,
'timezone': 'Etc/GMT+8',
'extra_parameters': {"network": "NOAA SURFRAD",
"network_api_id": 'some_id',
"network_api_abbreviation": 'abbrv',
"observation_interval_length": 5},
},
{
'name': 'site3',
'latitude': 3,
'longitude': -3,
'elevation': 6,
'timezone': 'Etc/GMT+8',
'extra_parameters': {"network": "NOAA SOLRAD",
"network_api_id": 'some_id',
"network_api_abbreviation": 'abbrv',
"observation_interval_length": 1},
},
{
'name': 'site4',
'latitude': 4,
'longitude': -5,
'elevation': 12,
'timezone': 'Etc/GMT+8',
'extra_parameters': {"observation_interval_length": 1,
"network": 'NREL MIDC',
"network_api_id": 'BMS',
"network_api_abbreviation": 'abbrv'},
},
{
'name': 'site4',
'latitude': 4,
'longitude': -5,
'elevation': 12,
'timezone': 'Etc/GMT+8',
'extra_parameters': {"observation_interval_length": 1,
"network": 'Unincorporated',
"network_api_id": 'BMS',
"network_api_abbreviation": 'abbrv'},
}
]]
def expected_site(site):
new_site = site.copy()
network = site['extra_parameters'].get('network', '')
new_site['name'] = f"{network} {site['name']}"
new_site.update({'extra_parameters': json.dumps(site['extra_parameters'])})
return new_site
site_string_dicts = [expected_site(site) for site in site_dicts()]
site_objects = [Site.from_dict(site) for site in site_string_dicts]
@pytest.fixture
def site_dicts_param():
return site_string_dicts
@pytest.fixture
def site_objects_param():
return site_objects
def site_to_obs(site):
ep = json.loads(site.extra_parameters)
interval_length = ep['observation_interval_length']
return Observation.from_dict({
'name': 'site ghi',
'variable': 'ghi',
'interval_label': 'ending',
'interval_value_type': 'interval_mean',
'interval_length': interval_length,
'site': site,
'uncertainty': 0,
'extra_parameters': site.extra_parameters
})
@pytest.fixture
def observation_objects_param(site_objects_param):
return [site_to_obs(site) for site in site_objects_param]
@pytest.fixture
def networks():
return ['DOE ARM', 'NOAA SURFRAD', 'NOAA SOLRAD', 'Unincorporated']
@pytest.fixture
def mock_api(mocker, site_objects_param, observation_objects_param):
api = mocker.MagicMock()
api.list_sites.return_value = site_objects_param
api.list_observations.return_value = observation_objects_param
return api
index = pd.date_range('20190101T1200Z', '20190101T1229Z',
freq='min', tz='UTC')
values = np.arange(100, 130)
@pytest.fixture
def start():
return | pd.Timestamp('20190101T1200Z') | pandas.Timestamp |
import time
import os
import sys
import scipy
import math
import laspy
import psutil
import pickle
import logging
import numpy as np
import pandas as ps
import scipy.linalg
import datetime
import multiprocessing
import matplotlib as plt
from scipy import spatial
from sklearn import metrics
from numpy import linalg as LA
from xgboost import XGBClassifier
from sklearn.neighbors import KDTree
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
def memcalc():
mem='RAM: '+str(psutil.virtual_memory()[2])+'%'
return mem
def cpucalc():
cpu='CPU: '+str(psutil.cpu_percent(interval=None, percpu=False))+'%'
return cpu
def setup_custom_logger(name):
class ContextFilter(logging.Filter):
def filter(self, record):
record.memcalc = memcalc()
record.cpucalc = cpucalc()
return True
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(memcalc)s %(cpucalc)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S' )
handler = logging.FileHandler('./logs/lOG_'+str(datetime.datetime.now()) +'.txt', mode='w')
handler.setFormatter(formatter)
screen_handler = logging.StreamHandler(stream=sys.stdout)
screen_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.addHandler(screen_handler)
logger.addFilter(ContextFilter())
return logger
logger = setup_custom_logger('myapp')
def neighbours(data, n=10):
'''tree = KDTree(data[:,:])
logger.info('KDTree built')
_, idx = tree.query(data[:,:], k=n)
return idx'''
tree = spatial.KDTree(data)
logger.info('KDTree built')
_, idx = tree.query(data, k=10)
return idx
class featurecalculation:
def features(self,filename):
"""
INPUT :- LAS file name
OUTPUT :- A numpy array of size (no. of points , 22) consisting predefined features
"""
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1) # Create a multiprocessing Poolfor div in range(division):
logger.info("calculating neighbours")
result=pool.map(self.calc, range(division),chunksize=1) # process data_inputs iterable with pool
for divo in range(division):
if divo == (division - 1):
full_training_data[divo *maximum_points:] = result[divo][:][:]
else :
full_training_data[divo *maximum_points:(divo +1)*maximum_points] = result[divo][:][:]
logger.info(divo)
np.save('pointclouds/'+filename[:-4]+'_features' , full_training_data)
return
def calc(self,div):
# Calculating Feature for small point cloud with (maximum_points) no. of points
small_xyz = xyz[div*maximum_points:(div+1)*maximum_points]
small_data = data[div*maximum_points:(div+1)*maximum_points]
tree = spatial.KDTree(small_xyz)
_, idx = tree.query(small_xyz[:,:], k=10)
logger.info("Starting new Worker Process:%s",div)
medoid = []
for i in small_xyz[[idx]]:
d = scipy.spatial.distance.pdist(i)
d = scipy.spatial.distance.squareform(d)
medoid.append(np.argmin(d.sum(axis=0)))
covariance = []
for i in small_xyz[[idx]]:
covariance.append(np.cov(np.array(i).T))
covariance = np.array(covariance)
# Calculating Eigen Vectors and Eigen Values for each point
# w: eigen values , v: eigen vectors
w,v = LA.eigh(covariance)
w = [i/np.sum(i) for i in w]
w = np.array(w)
training_data = np.zeros((len(small_xyz),22))
# Calculating Geometric features for each point
training_data[:,0] = np.power(np.multiply(np.multiply(w[:,0], w[:,1]), w[:,2]), 1/3) #omnivariance
training_data[:,1] = -np.multiply(w[:,0], np.log(w[:,0]))-np.multiply(w[:,1], np.log(w[:,1]))-np.multiply(w[:,1], np.log(w[:,1])) #eigenentropy
training_data[:,2] = np.divide(w[:,2]-w[:,0], w[:,2]) #anistropy
training_data[:,3] = np.divide(w[:,1]-w[:,0], w[:,2]) #planarity
training_data[:,4] = np.divide(w[:,2]-w[:,1], w[:,2]) #linearity
training_data[:,5] = w[:,0] #surface variation
training_data[:,6] = np.divide(w[:,0], w[:,2]) #scatter
training_data[:,7] = 1-abs(v[:,0,2]) #verticality
temp = []
for i in range(len(small_xyz)):
temp.append(np.subtract(small_xyz[idx[i]],small_xyz[idx[medoid[i]]]))
# Calculating Central Moments and height feature for each point
moment11 = [] #moment 1st order 1st axis
moment12 = [] #moment 1st order 2nd axis
moment21 = [] #moment 2nd order 1st axis
moment22 = [] #moment 2nd order 2nd axis
vertical_range = [] #vertical range
height_below = [] #height below
for i in range(len(small_xyz)):
moment11.append(np.sum(np.dot(temp[i], v[i][2])))
moment12.append(np.sum(np.dot(temp[i], v[i][1])))
moment21.append((np.sum(np.dot(temp[i], v[i][2]))**2))
moment22.append((np.sum(np.dot(temp[i], v[i][1]))**2))
vertical_range.append((np.amax(small_xyz[idx[i]],axis=0))[2] - (np.amin(small_xyz[idx[i]],axis=0))[2])
height_below.append(small_xyz[i][2] - (np.amin(small_xyz[idx[i]],axis=0))[2])
training_data[:,8] = np.array(moment11)
training_data[:,9] = np.array(moment12)
training_data[:,10] = np.array(moment21)
training_data[:,11] = np.array(moment22)
training_data[:,12] = np.array(vertical_range)
training_data[:,13] = np.array(height_below)
moment11,moment12,moment21,moment22,temp = None,None,None,None,None
#height above
vertical_range = np.array(vertical_range)
height_below = np.array(height_below)
height_above = vertical_range - height_below
training_data[:,14] = np.array(height_above)
vertical_range,height_above,height_below = None,None,None
# Calculating Color features for each points
rgb2hsv = plt.colors.rgb_to_hsv((small_data[:,3:6]).astype('uint8'))
training_data[:,15:18] = np.array(rgb2hsv)
nbr_color = []
for i in range(len(small_xyz)):
nbr_color.append(np.sum(rgb2hsv[idx[i]], axis=0))
nbr_color = np.array(nbr_color)
nbr_color = nbr_color/10
training_data[:,18:21] = np.array(nbr_color)
nbr_color = None
rgb2hsv = None
y = small_data[:,6]
training_data[:,21] = np.array(y)
return training_data
for image_file_name in os.listdir('pointclouds'):
if image_file_name.endswith(".las"):
filename = image_file_name
if not(os.path.exists("pointclouds/"+filename[:-4]+"_features.npy")):
maximum_points = 50000
infile = laspy.file.File("pointclouds/"+filename, mode='rw')
print("Starting "+filename+" feature calculation")
col = {'x':infile.x, 'y':infile.y, 'z':infile.z, 'r':infile.red/256, 'g':infile.green/256, 'b':infile.blue/256, 'c':infile.classification}
data = | ps.DataFrame(data=col) | pandas.DataFrame |
import streamlit as st
from collections import defaultdict
from kafka import KafkaConsumer
from json import loads
import time
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import PIL
from PIL import Image
import streamlit.components.v1 as components
import os
import tweepy
import logging
import sys
from collections import deque
from geopy.geocoders import Nominatim
import threading
import pickle
# Streamlit layout CSS
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
max-width: 100vw;
padding-top: 1rem;
padding-right: 1rem;
padding-left: 1rem;
padding-bottom: 1rem;
}}
.reportview-container .main {{
color: black;
background-color: white;
}}
</style>
""",
unsafe_allow_html=True,
)
# Lambdas and Constants
def normalize(x): return (x - np.mean(x) + np.finfo(x.dtype).eps) / (np.std(x) + np.finfo(x.dtype).eps)
def timestamp_seconds(x): return datetime.fromisoformat(x).timestamp()
wave_dict = defaultdict(list)
pick_dict = defaultdict(list)
event_dict = defaultdict(dict)
EVENT_MIN_GAP = 5
WINDOW_LENGTH = 100
WINDOW_NUMBER = 60
HOP_LENGTH = 10
REFRESH_SEC = 1.0
MAP_WIDTH = 900
MAP_HEIGHT = 650
MAP_ZOOM = 9
BOT_MAGNITUDE_THRESHOLD = 1.5
GEOLOC_TOUT = 5 # in seconds
I_MADE_A_TWEET = False
dt = 0.01
prev_event_bundle = None
prev_event_bundle = (0.0, 0.0, 0.0, 0.0)
CONFIG_PKL = "config_hawaii.pkl"
STATION_CSV = "stations_hawaii.csv"
with open(CONFIG_PKL, "rb") as fp:
CONFIG = pickle.load(fp)
STATIONS = pd.read_csv(STATION_CSV, delimiter="\t")
STATIONS = STATIONS.rename(columns={"station":"id"})
NUM_STATION = len(STATIONS)
consumer = None
# Connection to Kafka
try:
print('Connecting to k8s kafka')
BROKER_URL = 'quakeflow-kafka:9092'
consumer = KafkaConsumer(
bootstrap_servers=[BROKER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
key_deserializer=lambda x: loads(x.decode('utf-8')),
value_deserializer=lambda x: loads(x.decode('utf-8'))
)
print('k8s kafka connection success!')
consumer.subscribe(['waveform_raw', 'phasenet_picks', 'gmma_events'])
except BaseException:
print('k8s Kafka connection error')
try:
print('Connecting to local kafka')
BROKER_URL = 'localhost:9092'
consumer = KafkaConsumer(
bootstrap_servers=[BROKER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
key_deserializer=lambda x: loads(x.decode('utf-8')),
value_deserializer=lambda x: loads(x.decode('utf-8'))
)
print('local kafka connection success!')
consumer.subscribe(['waveform_raw', 'phasenet_picks', 'gmma_events'])
except BaseException:
print('local Kafka connection error')
if not consumer:
print('No kafka server found!')
# Setting up Tweepy
consumer_key = os.getenv('CONSUMER_KEY')
consumer_secret = os.getenv('CONSUMER_SECRET')
access_token = os.getenv('ACCESS_TOKEN')
access_token_secret = os.getenv('ACCESS_TOKEN_SECRET')
print(consumer_key)
print(consumer_secret)
print(access_token)
print(access_token_secret)
logger = logging.getLogger()
def create_api():
consumer_key = os.getenv("CONSUMER_KEY")
consumer_secret = os.getenv("CONSUMER_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")
if not consumer_key:
return
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
api.verify_credentials()
logger.info("API created")
return api
except Exception as e:
logger.error("Error creating API", exc_info=True)
return None
api = create_api()
# Functions
def latlon2address(lat, lon, geolocator):
try:
location = geolocator.reverse(f"{lat}, {lon}")
print(location)
return location.address
except BaseException:
return None
geolocator = Nominatim(user_agent="https", timeout=5)
def update_figure_layout(figure):
figure.update_layout(
mapbox_style="white-bg",
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
"sourceattribution": "United States Geological Survey",
"source": [
"https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/tile/{z}/{y}/{x}"
]
}
])
figure.update_layout(
showlegend=True,
width=MAP_WIDTH,
height=MAP_HEIGHT,
geo=dict(
landcolor='rgb(217, 217, 217)',
lonaxis=dict(
showgrid=True,
gridwidth=0.05,
range=CONFIG["xlim_degree"],
dtick=5
),
lataxis=dict(
showgrid=True,
gridwidth=0.05,
range=CONFIG["ylim_degree"],
dtick=5
)
),
)
figure.update_layout(margin={"r": 0.5, "t": 0.5, "l": 0, "b": 0})
return figure
def get_plot_picks(message, t0, tn):
t0_idx = 0
t_picks = []
colors = []
for i, x in enumerate(message):
if timestamp_seconds(x["timestamp"]) >= t0:
if t0_idx == 0:
t0_idx = i
if timestamp_seconds(x["timestamp"]) <= tn:
t_picks.append(timestamp_seconds(x["timestamp"]) - t0)
if x["type"] == "p":
colors.append("b")
elif x["type"] == "s":
colors.append("r")
else:
raise("Phase type error!")
else:
return t_picks, colors, t0_idx
return t_picks, colors, t0_idx
def get_plot_events(message, t0, tn):
t0_idx = 0
t_events = []
mag_events = []
loc_events = []
for k, x in message.items():
if timestamp_seconds(x["time"]) >= t0:
# if t0_idx == 0:
# t0_idx = i
if timestamp_seconds(x["time"]) <= tn - 8:
t_events.append(timestamp_seconds(x["time"]) - t0)
mag_events.append(x["magnitude"])
loc_events.append(x["location"])
else:
return t_events, mag_events, loc_events, t0_idx
return t_events, mag_events, loc_events, t0_idx
def update_figure(figure, lat_list, lng_list, z_list, mag_events, t_events):
if(figure is not None):
figure.data = []
figure_df = pd.DataFrame({'lat': lat_list, 'lon': lng_list, 'z': z_list, 'mag': mag_events,
'time': t_events, 'size': [(mag_event**4) / 3.5 for mag_event in mag_events]})
figure = px.scatter_mapbox(
figure_df,
lat="lat",
lon="lon",
hover_data=[
"mag",
"time",
"lat",
"lon"],
size="size",
color_discrete_sequence=["fuchsia"],
zoom=MAP_ZOOM,
height=300)
figure = update_figure_layout(figure)
return figure
def update_figure_with_cols(figure, col1, col2, lat_list, lng_list, z_list, mag_events, t_events):
with col1:
figure = update_figure(figure, lat_list, lng_list, z_list, mag_events, t_events)
return figure
def tweep_update_with_media(api, mag, lng, lat, z, event_time, geolocator):
temp_time = time.time()
# get figure using update_figure
figure = update_figure(None, [lat], [lng], [z], [mag], [event_time])
figure.write_image("twitter_fig.png")
print("Time taken to render: %f" % (time.time() - temp_time))
address = latlon2address(lat, lng, geolocator)
if address is not None:
caption = f"Magnitude {mag} earthquake occurred at address {address} at time {event_time}"
else:
caption = "Magnitude %f earthquake happened at longitude %f degrees, latitude %f degrees at depth %f km at time %s" % (
mag, lng, lat, z, event_time)
try:
api.update_with_media("twitter_fig.png", caption)
print('Update Twitter with media success!', flush=True)
global I_MADE_A_TWEET
I_MADE_A_TWEET = True # Demo purpose, don't want to use up all the Twitter API Quota
print("Time taken to from start to end to fully upload to twitter: %f" % (time.time() - temp_time))
except BaseException:
pass
def tweepy_status_update(event_dict):
if(len(event_dict) > 0):
event = list(event_dict.values())[-1]
print("tweepy_status_update (event): ", event)
event_time = event['time']
lng = lng_from_x(event['location'][0])
lat = lat_from_y(event['location'][1])
z = event['location'][2]
mag = event['magnitude']
bundle = (lng, lat, z, mag)
global prev_event_bundle
if(bundle != prev_event_bundle):
print("----------New Event----------")
prev_event_bundle = bundle
if mag > BOT_MAGNITUDE_THRESHOLD and api is not None and not I_MADE_A_TWEET:
print("time is %s, current time is %f" % (event_time, time.time()))
print("Try to update status on twitter............")
print("Magnitude %f earthquake happened at longitude %f, latitude %f at depth %f at time %s" % (mag, lng, lat, z, event_time))
upload_thread = threading.Thread(
target=tweep_update_with_media, name="Uploader", args=(
api, mag, lng, lat, z, event_time, geolocator, ))
upload_thread.start()
temp_time = time.time()
# Pure text upload, will be fast
# api.update_status(
# "Magnitude %f earthquake happened at longitude %f, latitude %f at depth %f at time %s" %
# (mag, lng, lat, z, event_time))
print("Time taken for fast alert: %f" % (time.time() - temp_time)) # It took: 0.161690 seconds
def extract_df_from_event_dict(event_dict):
event_dict_values = list(event_dict.values())
event_dict_values.reverse()
lat_values = []
lon_values = []
z_values = []
mag_values = []
time_values = []
for event in event_dict_values:
lon_values.append(lng_from_x(event['location'][0]))
lat_values.append(lat_from_y(event['location'][1]))
z_values.append(event['location'][2])
mag_values.append(event['magnitude'])
time_values.append(event['time'])
event_dict_df = pd.DataFrame({'Magnitude': mag_values, 'Time': time_values, 'Latitude (deg)': lat_values,
'Longitude (deg)': lon_values, 'Depth (km)': z_values})
return event_dict_df
# Page header
image_data = np.asarray(Image.open('quakeflow logo design 2.jpg'))
st.image(image_data, caption=None, width=None, use_column_width=None, clamp=False, channels='RGB', output_format='auto')
st.balloons()
# Streamlit layout
col1, col2 = st.beta_columns([1, 1])
# Initial plotting
with col1:
experimental_df = | pd.DataFrame({'lat': [], 'lon': [], 'z': [], 'mag': [], 'time': [], 'size': []}) | pandas.DataFrame |
import pandas as pd
from pandas.io.json import json_normalize
from TweetsToDB.TweetModel import Tweet
import json
#Need to create a dataframe in order to compute stats
def statTweets(jsonTweet):
options = ['tweetLikes', 'tweetRe', 'tweetTextCount']
formatted_options = ['Likes','Retweets', 'Character Count']
normalTweet = | json_normalize(jsonTweet) | pandas.io.json.json_normalize |
"""
Author: <NAME>, Phd Student @ Ishida Laboratory, Department of Computer Science, Tokyo Institute of Technology
Created on: February 21st, 2020
Description: This file contains necessary functions for the generation and splitting of the raw original dataset.
"""
import os
import random
import numpy as np
import pandas as pd
from collections import Counter
from tqdm import tqdm
from chemistry_methods.reactions import parse_reaction_roles
from chemistry_methods.fingerprints import construct_ecfp, construct_hsfp
from chemistry_methods.reaction_analysis import extract_info_from_reaction, extract_info_from_molecule
from chemistry_methods.reaction_cores import get_reaction_core_atoms, get_separated_cores
from chemistry_methods.molecules import get_atom_environment, get_bond_environment
from data_methods.helpers import get_n_most_frequent_rows, encode_one_hot
def generate_unique_compound_pools(args):
""" Generates and stores unique (RDKit Canonical SMILES) chemical compound pools of the reactants and products for a
chemical reaction dataset. The dataset needs to contain a column named 'rxn_smiles' in which the values for the
mapped reaction SMILES strings are stored. """
reactant_pool_smiles, product_pool_smiles, reactant_pool_mol, product_pool_mol = [], [], [], []
reactant_reaction_class, product_reaction_class = [], []
# Read the raw original chemical reaction dataset.
raw_dataset = | pd.read_csv(args.dataset_config.raw_dataset) | pandas.read_csv |
import pandas as pd
import ast
import json
from psutil import test
from torch.utils import data
from transformers import BertTokenizerFast as fast_tokenizer
from transformers import AutoTokenizer
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import sys
from transformers.utils.dummy_pt_objects import TransfoXLLMHeadModel
sys.path.append('/pvc/')
from src.utils.codiespDataset import *
from src.baselines.spanish_bert import SpanishBertBaseline
from pytorch_lightning.utilities.seed import seed_everything
def preprocessing_for_bert(data, label_to_pos, tokenizer, language):
"""Perform required preprocessing steps for pretrained BERT.
@param data (np.array): Array of texts to be processed.
@return input_ids (torch.Tensor): Tensor of token ids to be fed to a model.
@return attentimport time
import datetimeion_masks (torch.Tensor): Tensor of indices specifying which
tokens should be attended to by the model.
"""
# Create empty lists to store outputs
input_ids = []
attention_masks = []
# For every sentence...
#if len(data.notes.shape) == 2 :
# data.columns = ['patient_id', 'label', 'notes', 'translated_notes', 'official_translation_notes']
sent = data.notes.tolist()
#for sent in data.notes.tolist():
# `encode_plus` will:
# (1) Tokenize the sentence
# (2) Add the `[CLS]` and `[SEP]` token to the start and end
# (3) Truncate/Pad sentence to max length
# (4) Map tokens to their IDs
# (5) Create attention mask
# (6) Return a dictionary of outputs
#use batch encode plus to process batch by batch and not per sentence
#pytorch dataset class allows different lenghts of tokens
encoded_sent = tokenizer.batch_encode_plus(
batch_text_or_text_pairs=sent, # Preprocess sentence
add_special_tokens=True, # Add `[CLS]` and `[SEP]`
max_length=512, # Max length to truncate/pad
pad_to_max_length=True, # Pad sentence to max length
#return_tensors='pt', # Return PyTorch tensor
return_attention_mask=True, # Return attention mask
#padding='longest',
truncation=True,
return_token_type_ids=False,
return_tensors='pt')
# Add the outputs to the lists
input_ids = encoded_sent.get('input_ids')
attention_masks = encoded_sent.get('attention_mask')
# Convert lists to tensors
#input_ids = torch.tensor(input_ids)
#attention_masks = torch.tensor(attention_masks)
label_tensor = label_to_tensor(data, label_to_pos)
#label_tensor = label_to_index(data, label_to_pos)
dataset = TensorDataset(input_ids, attention_masks, label_tensor)
return dataset
def label_to_index(data, label_to_pos):
def row_to_pos(row):
tmp = list()
for i, code in enumerate(row):
tmp.append(label_to_pos[code])
return tmp
return torch.tensor(data['labels'].apply(row_to_pos))
def label_to_tensor(data, label_to_pos):
tmp = np.zeros((len(data),
len(label_to_pos))
)
c = 0
test_me = list()
for idx, row in data.iterrows():
for code in row['labels']:
try:
tmp[c, label_to_pos[code]] = 1
test_me.append(code)
except:
#print('WARNING Number of labels you are not using the english_spanish label filter')
pass
c += 1
return torch.tensor(tmp)
def set_seeds(seed):
import random
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
seed_everything(seed=seed)
def load_CodieSp_data(task, language, is_cutoff=True, is_cluster=True,):
if is_cluster:
prefix = 'pvc'
else:
prefix = 'tmp'
test_data = pd.DataFrame()
assert task in ["diagnosis", "procedure"] and language in ['english', 'spanish']
path = f'/{prefix}/codiesp/xl-training-data/v4/{task}_task/{language}_clinical_text'
if is_cutoff:
train_data = pd.read_csv(f'/{path}/train_data_cutoff.csv')
train_data.labels = train_data.labels.apply(lambda row: ast.literal_eval(row))
dev_data = pd.read_csv(f'/{path}/dev_data_cutoff.csv')
dev_data.label = dev_data.label.apply(lambda row: ast.literal_eval(row))
try:
test_data = pd.read_csv(f'/{path}/test_data_cutoff.csv')
test_data.label = test_data.label.apply(lambda row: ast.literal_eval(row))
except:
print("test_data_cutoff is not splitted in create_dataset.py because no test set is used")
with open(f'/{path}/all_codes_cutoff.json'.format(path)) as f:
labels = json.load(f)['all_D_codes']
else:
train_data = pd.read_csv('/{}/train_data.csv'.format(path))
train_data.label = train_data.label.apply(lambda row: ast.literal_eval(row))
dev_data = pd.read_csv('/{}/dev_data.csv'.format(path))
dev_data.label = dev_data.label.apply(lambda row: ast.literal_eval(row))
try:
test_data = pd.read_csv('/{}/test_data.csv'.format(path))
test_data.label = test_data.label.apply(lambda row: ast.literal_eval(row))
test_data = test_data.loc[test_data.label.apply(len) > 0]
except:
print("test_data is not splitted in create_dataset.py because no test set is used")
with open('/{}/all_codes.json'.format(path)) as f:
labels = json.load(f)['all_D_codes']
train_data = train_data.loc[train_data.label.apply(len) > 0]
dev_data = dev_data.loc[dev_data.label.apply(len) > 0]
return train_data, dev_data, test_data, labels
def load_CodieSp_datav2(task, language, is_cutoff=True, is_cluster=True,):
if is_cluster:
prefix = 'pvc'
else:
prefix = 'tmp'
test_data = pd.DataFrame()
assert task in ["diagnosis", "procedure"] and language in ['english', 'spanish']
path = f'/{prefix}/codiesp/xl-training-data/v4/{task}_task/rebalanced/{language}_clinical_text'
if is_cutoff:
train_data = | pd.read_csv(f'/{path}/{language}_train_data_cutoff.csv') | pandas.read_csv |
import numpy as np
from .base import EvaluationMethod
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
class TemporalMetric(EvaluationMethod):
def __init__(self, metric, label=None):
super(TemporalMetric, self).__init__()
self.metric = metric
self.ts = []
self.label = label
self.no_groundtruth = True
def evaluate_both(self, i, LR, SR, HR):
self.ts.append(self.metric(SR,HR))
def finalize(self):
ts = np.concatenate(self.ts, axis=0)
np.save(self.dir / 'losses.npy', ts, allow_pickle=False)
def summarize(self, paths, outdir):
data = {name: np.load(paths[name] / 'losses.npy') for name in paths}
C = data[list(data.keys())[0]].shape[-1]
with sns.plotting_context('paper'), sns.axes_style('whitegrid'), sns.color_palette('deep'):
label = self.label if self.label else 'loss'
for c in range(C):
df = | pd.DataFrame.from_dict({name: data[name][..., c] for name in data}) | pandas.DataFrame.from_dict |
# pylint: disable=W0612,E1101
from datetime import datetime
import os
import operator
import unittest
import numpy as np
from pandas.core.api import DataFrame, Index, notnull
from pandas.core.datetools import bday
from pandas.core.frame import group_agg
from pandas.core.panel import WidePanel, LongPanel, pivot
import pandas.core.panel as panelmod
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.core.panel as panelm
import pandas.util.testing as common
class PanelTests(object):
panel = None
def test_pickle(self):
import cPickle
pickled = cPickle.dumps(self.panel)
unpickled = cPickle.loads(pickled)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
class SafeForLongAndSparse(object):
def test_repr(self):
foo = repr(self.panel)
def test_iter(self):
common.equalContents(list(self.panel), self.panel.items)
def _check_statistic(self, frame, name, alternative):
f = getattr(frame, name)
for i, ax in enumerate(['items', 'major', 'minor']):
result = f(axis=i)
assert_frame_equal(result, frame.apply(alternative, axis=ax))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_statistic(self.panel, 'count', f)
def test_sum(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.sum()
self._check_statistic(self.panel, 'sum', f)
def test_prod(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return np.prod(nona)
self._check_statistic(self.panel, 'prod', f)
def test_mean(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].mean()
self._check_statistic(self.panel, 'mean', f)
def test_median(self):
def f(x):
x = np.asarray(x)
return np.median(x[notnull(x)])
self._check_statistic(self.panel, 'median', f)
def test_min(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.min()
self._check_statistic(self.panel, 'min', f)
def test_max(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.max()
self._check_statistic(self.panel, 'max', f)
def test_var(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.var(ddof=1)
self._check_statistic(self.panel, 'var', f)
def test_std(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.std(ddof=1)
self._check_statistic(self.panel, 'std', f)
def test_skew(self):
return
try:
from scipy.stats import skew
except ImportError:
return
def f(x):
x = np.asarray(x)
return skew(x[notnull(x)], bias=False)
self._check_statistic(self.panel, 'skew', f)
class SafeForSparse(object):
@staticmethod
def assert_panel_equal(x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert(self.panel._get_axis(0) is self.panel.items)
assert(self.panel._get_axis(1) is self.panel.major_axis)
assert(self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
self.panel.items = new_items
self.assert_(self.panel.items is new_items)
self.panel.major_axis = new_major
self.assert_(self.panel.major_axis is new_major)
self.panel.minor_axis = new_minor
self.assert_(self.panel.minor_axis is new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.div)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
common.equalContents(self.panel.keys(), self.panel.items)
def test_iteritems(self):
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
check_op(operator.add, 'add')
check_op(operator.sub, 'subtract')
check_op(operator.mul, 'multiply')
check_op(operator.div, 'divide')
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
class TestWidePanel(unittest.TestCase, PanelTests,
SafeForLongAndSparse,
SafeForSparse):
@staticmethod
def assert_panel_equal(x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = common.makeWidePanel()
common.add_nans(self.panel)
def test_constructor(self):
# with BlockManager
wp = WidePanel(self.panel._data)
self.assert_(wp._data is self.panel._data)
wp = WidePanel(self.panel._data, copy=True)
self.assert_(wp._data is not self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = WidePanel([[['foo', 'foo', 'foo',],
['foo', 'foo', 'foo']]])
self.assert_(wp.values.dtype == np.object_)
vals = self.panel.values
# no copy
wp = WidePanel(vals)
self.assert_(wp.values is vals)
# copy
wp = WidePanel(vals, copy=True)
self.assert_(wp.values is not vals)
def test_constructor_cast(self):
casted = WidePanel(self.panel._data, dtype=int)
casted2 = WidePanel(self.panel.values, dtype=int)
exp_values = self.panel.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [['foo', 'bar', 'baz']]
self.assertRaises(ValueError, DataFrame, data, dtype=float)
def test_consolidate(self):
self.assert_(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assert_(not self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assert_(panel._data.is_consolidated())
def test_from_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A' : itema, 'B' : itemb[5:]}
d2 = {'A' : itema._series, 'B' : itemb[5:]._series}
d3 = {'A' : DataFrame(itema._series),
'B' : DataFrame(itemb[5:]._series)}
wp = WidePanel.from_dict(d)
wp2 = WidePanel.from_dict(d2) # nested Dict
wp3 = WidePanel.from_dict(d3)
self.assert_(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = WidePanel.from_dict(d, intersect=True)
self.assert_(wp.major_axis.equals(itemb.index[5:]))
def test_from_dict_mixed(self):
pass
def test_values(self):
self.assertRaises(Exception, WidePanel, np.random.randn(5, 5, 5),
range(5), range(5), range(4))
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assert_('ItemA' not in self.panel.items)
del self.panel['ItemB']
self.assert_('ItemB' not in self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = WidePanel(values, range(3), range(3), range(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA']).to_long()
self.panel['ItemE'] = lp
lp = self.panel.filter(['ItemA', 'ItemB']).to_long()
self.assertRaises(Exception, self.panel.__setitem__,
'ItemE', lp)
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index,
columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = 1
# object dtype
self.panel['ItemQ'] = 'foo'
self.assert_(self.panel['ItemQ'].values.dtype == np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assert_(self.panel['ItemP'].values.dtype == np.bool_)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert(conformed.index.equals(self.panel.major_axis))
assert(conformed.columns.equals(self.panel.minor_axis))
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex,
major_axis=new_major, major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis)
assert(result.items is self.panel.items)
assert(result.major_axis is self.panel.major_axis)
assert(result.minor_axis is self.panel.minor_axis)
self.assertRaises(Exception, self.panel.reindex)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis,
method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assert_(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
def test_combinePanel_with_long(self):
lng = self.panel.to_long(filter_observations=False)
result = self.panel.add(lng)
self.assert_panel_equal(result, self.panel * 2)
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
assert_series_equal(xs['ItemA'], ref.xs(idx))
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assert_(xs['ItemA'].dtype == np.float64)
self.assert_(xs['ItemD'].dtype == np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx])
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assert_(xs['ItemA'].dtype == np.float64)
self.assert_(xs['ItemD'].dtype == np.object_)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assert_(result.items is self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assert_(result.items is self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assert_(result.major_axis is self.panel.minor_axis)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assert_(result.items is self.panel.major_axis)
# this should also work
self.assertRaises(Exception, self.panel.swapaxes, 'items', 'items')
def test_to_long(self):
# filtered
filtered = self.panel.to_long()
# unfiltered
unfiltered = self.panel.to_long(filter_observations=False)
assert_panel_equal(unfiltered.to_wide(), self.panel)
def test_to_long_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_long()
wp = lp.to_wide()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
assert_frame_equal(wp['bool'], panel['bool'])
def test_filter(self):
pass
def test_apply(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx),
shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx),
shifted.minor_xs(idx_lag))
self.assertRaises(Exception, self.panel.shift, 1, axis='items')
class TestLongPanel(unittest.TestCase):
def setUp(self):
panel = common.makeWidePanel()
common.add_nans(panel)
self.panel = panel.to_long()
self.unfiltered_panel = panel.to_long(filter_observations=False)
def test_pickle(self):
import cPickle
pickled = cPickle.dumps(self.panel)
unpickled = cPickle.loads(pickled)
assert_almost_equal(unpickled['ItemA'].values,
self.panel['ItemA'].values)
def test_len(self):
len(self.unfiltered_panel)
def test_constructor(self):
pass
def test_fromRecords_toRecords(self):
# structured array
K = 10
recs = np.zeros(K, dtype='O,O,f8,f8')
recs['f0'] = range(K / 2) * 2
recs['f1'] = np.arange(K) / (K / 2)
recs['f2'] = np.arange(K) * 2
recs['f3'] = np.arange(K)
lp = LongPanel.fromRecords(recs, 'f0', 'f1')
self.assertEqual(len(lp.items), 2)
lp = LongPanel.fromRecords(recs, 'f0', 'f1', exclude=['f2'])
self.assertEqual(len(lp.items), 1)
torecs = lp.toRecords()
self.assertEqual(len(torecs.dtype.names), len(lp.items) + 2)
# DataFrame
df = DataFrame.from_records(recs)
lp = LongPanel.fromRecords(df, 'f0', 'f1', exclude=['f2'])
self.assertEqual(len(lp.items), 1)
# dict of arrays
series = DataFrame.from_records(recs)._series
lp = | LongPanel.fromRecords(series, 'f0', 'f1', exclude=['f2']) | pandas.core.panel.LongPanel.fromRecords |
import json
import django
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'carebackend.settings'
sys.path.append(os.path.dirname(__file__) + '/..')
django.setup()
from places.models import Neighborhood, NeighborhoodEntry, Place, Area
from django.contrib.gis.geos import Polygon
import pandas as pd
from shapely.geometry import Polygon as ShapelyPolygon
fl = sys.argv[1]
area_to_use = sys.argv[2]
insert_if_not_found = sys.argv[3] == 'yes' if len(sys.argv) > 3 else False
area = Area.objects.get(key=area_to_use)
df = | pd.read_csv(fl) | pandas.read_csv |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo = algos.SelectAll(include_no_data=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[0]] = np.nan
data['c1'].ix[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('bt.ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].ix[dts[1]] = 105
data['c1'].ix[dts[2]] = 95
data['c1'].ix[dts[3]] = 105
data['c1'].ix[dts[4]] = 95
# low vol c2
data['c2'].ix[dts[1]] = 100.1
data['c2'].ix[dts[2]] = 99.9
data['c2'].ix[dts[3]] = 100.1
data['c2'].ix[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('bt.ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].ix[dts[2]] = 105
data['c2'].ix[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
#*- coding: utf-8 -*-
"""
Created on Sun Oct 9 17:37:42 2016
@author: noore
"""
from bigg import BiGG
from kegg import KEGG
import settings
import cache
import colorsys
import sys
from distutils.util import strtobool
import pandas as pd
import os
import json
import seaborn as sns
import numpy as np
from scipy.stats import gmean, ranksums
from matplotlib_venn import venn3
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.gridspec import GridSpec
from matplotlib import rcParams
import pdb # this is a reminder for Elad not to remove this pdb import
from topology import calculate_distances
import itertools
sns.set('paper', style='white')
ORGANISM = 'Escherichia coli'
STAT_TABLE_INDEX = ['all entries',
'keeping only E. coli data',
'filtering out data about mutated enzymes',
'keeping only data mapping to BiGG model',
'unique metabolite-enzyme pairs',
'unique metabolites',
'unique enzymes']
N_ACT_LABEL = 'Number of activating interactions'
N_INH_LABEL = 'Number of inhibiting interactions'
CONDITIONS = ['Glucose', 'Fructose', 'Galactose', 'Gluconate', 'Mannitol',
'Sorbitol', 'Mannose', 'Glycerol', 'Pyruvate', 'Lactate',
'Acetate', 'Succinate', 'glcNAc']
GENERAL_COLOR = '#939598'
CCM_COLOR = '#556B2f'
METABOLITE_COLOR = sns.color_palette('Set2')[3]
ACTIVATOR_COLOR = sns.color_palette('Set2')[0] # green
SUBSTRATE_COLOR = sns.color_palette(settings.HEATMAP_COLORMAP)[-1]
INHIBITOR_COLOR = sns.color_palette(settings.HEATMAP_COLORMAP)[0]
BOTH_COLOR = sns.color_palette('Set2')[5]
# Michaelis-Menten
Vmax = 1 # umol/min
Km = 1 # mM
s_range = np.logspace(-3, 3, 100) # 10 uM - 100 mM
v_s = lambda s: Vmax * s / (Km + s)
eps_s_v = lambda s: 1 - s / (Km + s)
v_x = lambda s: Vmax * (1 - s / (Km + s))
eps_x_v = lambda s: -s / (Km + s)
abs_eps_x_v = lambda s: s / (Km + s)
class FigurePlotter(object):
def __init__(self, rebuild_cache=False):
self.stat_df = pd.DataFrame(index=STAT_TABLE_INDEX,
columns=['km', 'KM_Value',
'regulation', 'KI_Value'])
self.kegg = KEGG()
self.bigg = BiGG()
self.native_mets = self.bigg.get_mets_in_cytosol()
self.native_ECs = self.bigg.get_native_EC_numbers()
self.get_data()
_fname = os.path.join(settings.RESULT_DIR, 'ecoli_interactions.csv')
self.regulation.to_csv(_fname)
def get_kinetic_param(self, name, value_col, organism=ORGANISM):
k = settings.read_cache(name)
self.stat_df[name].iat[0] = k.shape[0] # all entries
self.stat_df[value_col].iat[0] = (k[value_col] > 0).sum()
k = k[k['Organism'].str.lower() == organism.lower()]
self.stat_df[name].iat[1] = k.shape[0] # filtered by organsim
self.stat_df[value_col].iat[1] = (k[value_col] > 0).sum()
k = k[(pd.isnull(k['Commentary'])) |
((k['Commentary'].str.find('mutant') == -1) &
(k['Commentary'].str.find('mutation') == -1) &
(k['Commentary'].str.find('variant') == -1) &
(k['Commentary'].str.find('genetically engineered') == -1))]
self.stat_df[name].iat[2] = k.shape[0] # filtering mutants
self.stat_df[value_col].iat[2] = (k[value_col] > 0).sum()
# remove values with unmatched ligand
k = k[pd.notnull(k['bigg.metabolite'])]
k['bigg.metabolite'] = k['bigg.metabolite'].str.lower()
return k
def filter_non_native_interactions(self, k):
k = k[k['bigg.metabolite'].isin(self.native_mets)]
k = k[k['EC_number'].isin(self.native_ECs)]
return k
@staticmethod
def calc_sat(k, value_col, conc_df, agg_type='gmean'):
# filter missing Km or Ki values and -999 cases.
k = k[k[value_col] > 0]
# choose the minimum/median/gmean value among all repeats
k = k.groupby(['EC_number', 'bigg.metabolite'])[value_col]
if agg_type == 'minimum':
k = k.min()
elif agg_type == 'gmean':
k = k.apply(gmean)
elif agg_type == 'median':
k = k.median()
k = k.reset_index()
# join data with measured concentrations
k = k.join(conc_df, on='bigg.metabolite', how='inner')
# melt table so each line will be a combination of EC,
# substrate/inhibitor and growth condition
k = pd.melt(k, id_vars=('EC_number', 'bigg.metabolite', value_col),
var_name='growth condition', value_name='concentration')
k['saturation'] = k['concentration'] / (k['concentration'] +
k[value_col])
k['met:EC'] = k['bigg.metabolite'].str.cat(k['EC_number'], sep=':')
return k
@staticmethod
def calc_agg_sat(k, agg_type='median', value_col='elasticity'):
"""
calculates the [S]/K_S for all matching EC-metabolite pairs,
in log2-fold-change.
Input:
K_df - a DataFrame with three columns: EC_number,
bigg.metabolite, Value
conc_df - a DataFrame with
"""
k_grp = k.groupby(('bigg.metabolite', 'growth condition'))
if agg_type == 'median':
fc_med = k_grp.median()
elif agg_type == 'gmean':
fc_med = k_grp.agg(lambda x: gmean(list(x)))
fc_med = fc_med[[value_col]].reset_index()
fc_med = fc_med.pivot('bigg.metabolite', 'growth condition',
value_col)
return fc_med.sort_index(axis=0)
@staticmethod
def get_subsystem_data():
"""
Returns:
- 1-to-many mapping BiGG Reaction IDs to cellular subsystems
- many-to-many mapping of BiGG metabolites IDs to subsystems
"""
with open(settings.ECOLI_JSON_FNAME) as fp:
ecoli_model = json.load(fp, encoding='UTF-8')
subsystem_data = []
stoich_data = []
for r in ecoli_model['reactions']:
rid = r['id'].lower()
if 'subsystem' in r:
subsystem_data.append((rid, r['subsystem']))
if 'metabolites' in r:
for met, coeff in r['metabolites'].items():
stoich_data.append((rid, met, coeff))
reaction_subsystem_df = pd.DataFrame(
subsystem_data,
columns=('bigg.reaction', 'bigg.subsystem.reaction'))
reaction_subsystem_df.set_index('bigg.reaction', inplace=True)
stoich_df = pd.DataFrame(stoich_data,
columns=('bigg.reaction',
'bigg.metabolite', 'coeff'))
# now associate every metabolite to subsystems by joining the two
# tables
metabolite_subsystem_df = stoich_df.join(
reaction_subsystem_df, on='bigg.reaction')
metabolite_subsystem_df.rename(
columns={'bigg.subsystem.reaction': 'bigg.subsystem.metabolite'},
inplace=True)
metabolite_subsystem_df.drop('bigg.reaction', axis=1, inplace=True)
metabolite_subsystem_df.drop('coeff', axis=1, inplace=True)
metabolite_subsystem_df.drop_duplicates(inplace=True)
# keep only cytoplasmic metabolites, and remove the suffix _c
metabolite_subsystem_df = metabolite_subsystem_df[
metabolite_subsystem_df['bigg.metabolite'].str[-2:] == '_c']
# then remove the _c suffix and convert to lowercase
metabolite_subsystem_df.loc[:, 'bigg.metabolite'] = \
metabolite_subsystem_df['bigg.metabolite'].map(
lambda s: s[0:-2].lower())
return reaction_subsystem_df, metabolite_subsystem_df
def get_data(self):
_df = | pd.DataFrame.from_csv(settings.ECOLI_METAB_FNAME) | pandas.DataFrame.from_csv |
import nose
import unittest
from numpy import nan
from pandas.core.daterange import DateRange
from pandas.core.index import Index, MultiIndex
from pandas.core.common import rands, groupby
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal)
from pandas.core.panel import WidePanel
from collections import defaultdict
import pandas.core.datetools as dt
import numpy as np
import pandas.util.testing as tm
# unittest.TestCase
def commonSetUp(self):
self.dateRange = DateRange('1/1/2005', periods=250, offset=dt.bday)
self.stringIndex = Index([rands(8).upper() for x in xrange(250)])
self.groupId = Series([x[0] for x in self.stringIndex],
index=self.stringIndex)
self.groupDict = dict((k, v) for k, v in self.groupId.iteritems())
self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])
randMat = np.random.randn(250, 5)
self.stringMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.stringIndex)
self.timeMatrix = DataFrame(randMat, columns=self.columnIndex,
index=self.dateRange)
class GroupByTestCase(unittest.TestCase):
setUp = commonSetUp
def test_python_grouper(self):
groupFunc = self.groupDict.get
groups = groupby(self.stringIndex, groupFunc)
setDict = dict((k, set(v)) for k, v in groups.iteritems())
for idx in self.stringIndex:
key = groupFunc(idx)
groupSet = setDict[key]
assert(idx in groupSet)
class TestGroupBy(unittest.TestCase):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B' : ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C' : np.random.randn(8),
'D' : np.random.randn(8)})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
def test_basic(self):
data = Series(np.arange(9) / 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
self.assertEqual(len(v), 3)
agged = grouped.aggregate(np.mean)
self.assertEqual(agged[1], 1)
assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
assert_series_equal(agged, grouped.mean())
# Cython only returning floating point for now...
assert_series_equal(grouped.agg(np.sum).astype(float),
grouped.sum())
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
value_grouped = data.groupby(data)
assert_series_equal(value_grouped.aggregate(np.mean), agged)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
agged = grouped.aggregate({'one' : np.mean,
'two' : np.std})
group_constants = {
0 : 10,
1 : 20,
2 : 30
}
agged = grouped.agg(lambda x: group_constants[x.groupName] + x.mean())
self.assertEqual(agged[1], 21)
# corner cases
self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
def test_series_agg_corner(self):
# nothing to group, all NA
result = self.ts.groupby(self.ts * np.nan).sum()
assert_series_equal(result, Series([]))
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg({'A' : 'var', 'B' : 'std', 'C' : 'mean'})
expected = DataFrame({'A' : grouped['A'].var(),
'B' : grouped['B'].std(),
'C' : grouped['C'].mean()})
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_basic_regression(self):
# regression
T = [1.0*x for x in range(1,10) *10][:1095]
result = Series(T, range(0, len(T)))
groupings = np.random.random((1100,))
groupings = Series(groupings, range(0, len(groupings))) * 10.
grouped = result.groupby(groupings)
grouped.mean()
def test_transform(self):
data = Series(np.arange(9) / 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
transformed = grouped.transform(np.mean)
for name, group in grouped:
mean = group.mean()
for idx in group.index:
self.assertEqual(transformed[idx], mean)
def test_dispatch_transform(self):
df = self.tsframe[::5].reindex(self.tsframe.index)
filled = df.groupby(lambda x: x.month).fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_with_na(self):
index = Index(np.arange(10))
values = Series(np.ones(10), index)
labels = Series([nan, 'foo', 'bar', 'bar', nan, nan, 'bar',
'bar', nan, 'foo'], index=index)
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected)
def test_attr_wrapper(self):
grouped = self.ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {}
for name, gp in grouped:
expected[name] = gp.describe()
expected = DataFrame(expected).T
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import itertools
import numpy as np
import pandas as pd
def F_score(v, y_label):
x_0 = 0
x_1 = 0
v_pos = v[y_label > 0]
v_neg = v[y_label <= 0]
v_ave = np.mean(v)
v_pos_ave = np.mean(v_pos)
v_neg_ave = np.mean(v_neg)
len_pos = len(v_pos)
len_neg = len(v_neg)
for i in range(len_pos):
x_0 += (v_pos[i] - v_pos_ave) ** 2
for j in range(len_neg):
x_1 += (v_neg[i] - v_neg_ave) ** 2
f_score = ((v_pos_ave - v_ave) ** 2 + (v_neg_ave - v_ave) ** 2) / (
(1 / (len_pos - 1)) * x_0 + (1 / (len_neg - 1)) * x_1)
return f_score
def make_kmer_list(k, alphabet):
try:
return ["".join(e) for e in itertools.product(alphabet, repeat=k)]
except TypeError:
print("TypeError: k must be an inter and larger than 0, alphabet must be a string.")
raise TypeError
except ValueError:
print("TypeError: k must be an inter and larger than 0")
raise ValueError
def kmer(data_seq, k):
# calculate the k-mer feature of a seq
RNA_code = 'ACGT'
code_values = make_kmer_list(3, RNA_code)
count = np.zeros((len(data_seq), len(code_values)))
for i, line_value in enumerate(data_seq.values): # for every samples
for j, code_value in enumerate(line_value[0]): # for every position
if j <= len(line_value[0]) - k + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + k]:
count[i][p] += 1
count /= len(code_values) - k + 1
return count
def MvPS3merNP(all_positive_seq, all_negative_seq, train_samples, test_sample, interval):
RNA_code = 'ACGT'
all_final_seq_value_tra = []
all_final_seq_value_tes = []
for train_sample in train_samples:
# calculate Z matrix
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
code_values = make_kmer_list(interval, RNA_code)
code_len = len(code_values)
positive_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
negative_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
for i, line_value in enumerate(positive_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
positive_seq_value[p][j] += 1
positive_seq_value = np.matrix(positive_seq_value) * 1.0 / (len(positive_seq))
for i, line_value in enumerate(negative_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
negative_seq_value[p][j] += 1
negative_seq_value = np.matrix(negative_seq_value) * 1.0 / (len(negative_seq))
tes_final_value = []
tra_final_value = []
# training features
for train_sample_x in train_samples:
tra_positive_seq = all_positive_seq[train_sample_x]
tra_negative_seq = all_negative_seq[train_sample_x]
tra_positive_df = pd.DataFrame(tra_positive_seq)
tra_negative_df = pd.DataFrame(tra_negative_seq)
tra_positive_train = tra_positive_df.iloc[:, :]
tra_negative_train = tra_negative_df.iloc[:, :]
tra_positive_negative_train = pd.concat([tra_positive_train, tra_negative_train], axis=0)
tra_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tra_positive_negative_train))]
for i, line_value in enumerate(tra_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tra_final_seq_value[i][j] = positive_seq_value[p, j] - negative_seq_value[p, j]
tra_final_value.append(tra_final_seq_value)
tes_positive_seq = all_positive_seq[test_sample]
tes_negative_seq = all_negative_seq[test_sample]
tes_positive_df = pd.DataFrame(tes_positive_seq)
tes_negative_df = pd.DataFrame(tes_negative_seq)
tes_positive_train = tes_positive_df.iloc[:, :]
tes_negative_train = tes_negative_df.iloc[:, :]
tes_positive_negative_train = pd.concat([tes_positive_train, tes_negative_train], axis=0)
tes_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tes_positive_negative_train))]
for i, line_value in enumerate(tes_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tes_final_seq_value[i][j] = positive_seq_value[p, j] - negative_seq_value[p, j]
tes_final_value.append(tes_final_seq_value)
all_final_seq_value_tra.append(np.concatenate(tra_final_value))
all_final_seq_value_tes.append(np.concatenate(tes_final_value))
X_train = np.array(all_final_seq_value_tra)
X_test = np.array(all_final_seq_value_tes)
return X_train, X_test
def MvPS3merNP_KL(all_positive_seq, all_negative_seq, train_samples, test_sample, interval):
RNA_code = 'ACGT'
all_final_seq_value_tra = []
all_final_seq_value_tes = []
for train_sample in train_samples:
# calculate Z matrix
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
code_values = make_kmer_list(interval, RNA_code)
code_len = len(code_values)
positive_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
negative_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
for i, line_value in enumerate(positive_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
positive_seq_value[p][j] += 1
positive_seq_value = np.matrix(positive_seq_value) * 1.0 / (len(positive_seq))
for i, line_value in enumerate(negative_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
negative_seq_value[p][j] += 1
negative_seq_value = np.matrix(negative_seq_value) * 1.0 / (len(negative_seq))
positive_seq_value[positive_seq_value <= 0] = 1e-09
positive_seq_value_log = np.log(positive_seq_value)
# positive_seq_value_log[np.isinf(positive_seq_value_log)] = -10
negative_seq_value[negative_seq_value <= 0] = 1e-09
negative_seq_value_log = np.log(negative_seq_value)
# negative_seq_value_log[np.isinf(negative_seq_value_log)] = -10
Z = np.multiply(positive_seq_value, (positive_seq_value_log - negative_seq_value_log))
tes_final_value = []
tra_final_value = []
# training features
for train_sample_x in train_samples:
tra_positive_seq = all_positive_seq[train_sample_x]
tra_negative_seq = all_negative_seq[train_sample_x]
tra_positive_df = pd.DataFrame(tra_positive_seq)
tra_negative_df = pd.DataFrame(tra_negative_seq)
tra_positive_train = tra_positive_df.iloc[:, :]
tra_negative_train = tra_negative_df.iloc[:, :]
tra_positive_negative_train = pd.concat([tra_positive_train, tra_negative_train], axis=0)
tra_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tra_positive_negative_train))]
for i, line_value in enumerate(tra_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tra_final_seq_value[i][j] = Z[p, j]
tra_final_value.append(tra_final_seq_value)
tes_positive_seq = all_positive_seq[test_sample]
tes_negative_seq = all_negative_seq[test_sample]
tes_positive_df = pd.DataFrame(tes_positive_seq)
tes_negative_df = pd.DataFrame(tes_negative_seq)
tes_positive_train = tes_positive_df.iloc[:, :]
tes_negative_train = tes_negative_df.iloc[:, :]
tes_positive_negative_train = pd.concat([tes_positive_train, tes_negative_train], axis=0)
tes_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tes_positive_negative_train))]
for i, line_value in enumerate(tes_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tes_final_seq_value[i][j] = Z[p, j]
tes_final_value.append(tes_final_seq_value)
all_final_seq_value_tra.append(np.concatenate(tra_final_value))
all_final_seq_value_tes.append(np.concatenate(tes_final_value))
X_train = np.array(all_final_seq_value_tra)
X_test = np.array(all_final_seq_value_tes)
return X_train, X_test
def MvPS3merNP_JS(all_positive_seq, all_negative_seq, train_samples, test_sample, interval):
RNA_code = 'ACGT'
all_final_seq_value_tra = []
all_final_seq_value_tes = []
for train_sample in train_samples:
# calculate Z matrix
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
code_values = make_kmer_list(interval, RNA_code)
code_len = len(code_values)
positive_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
negative_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
for i, line_value in enumerate(positive_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
positive_seq_value[p][j] += 1
positive_seq_value = np.matrix(positive_seq_value) * 1.0 / (len(positive_seq))
for i, line_value in enumerate(negative_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
negative_seq_value[p][j] += 1
negative_seq_value = np.matrix(negative_seq_value) * 1.0 / (len(negative_seq))
positive_seq_value[positive_seq_value <= 0] = 1e-09
positive_seq_value_log = np.log(positive_seq_value)
# positive_seq_value_log[np.isinf(positive_seq_value_log)] = -10
negative_seq_value[negative_seq_value <= 0] = 1e-09
negative_seq_value_log = np.log(negative_seq_value)
# negative_seq_value_log[np.isinf(negative_seq_value_log)] = -10
seq_value_log = np.log((positive_seq_value + negative_seq_value) / 2)
Z = 1 / 2 * np.multiply(positive_seq_value, (positive_seq_value_log - seq_value_log)) + 1 / 2 * np.multiply(
negative_seq_value, (negative_seq_value_log - seq_value_log))
tes_final_value = []
tra_final_value = []
# training features
for train_sample_x in train_samples:
tra_positive_seq = all_positive_seq[train_sample_x]
tra_negative_seq = all_negative_seq[train_sample_x]
tra_positive_df = pd.DataFrame(tra_positive_seq)
tra_negative_df = pd.DataFrame(tra_negative_seq)
tra_positive_train = tra_positive_df.iloc[:, :]
tra_negative_train = tra_negative_df.iloc[:, :]
tra_positive_negative_train = pd.concat([tra_positive_train, tra_negative_train], axis=0)
tra_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tra_positive_negative_train))]
for i, line_value in enumerate(tra_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tra_final_seq_value[i][j] = Z[p, j]
tra_final_value.append(tra_final_seq_value)
tes_positive_seq = all_positive_seq[test_sample]
tes_negative_seq = all_negative_seq[test_sample]
tes_positive_df = | pd.DataFrame(tes_positive_seq) | pandas.DataFrame |
"""Functions to generate metafeatures using heuristics."""
import re
import numpy as np
import pandas as pd
from pandas.api import types
def _raise_if_not_pd_series(obj):
if not isinstance(obj, pd.Series):
raise TypeError(
f"Expecting `pd.Series type as input, instead of {type(obj)} type."
)
def _safe_div(num, denom):
EPSILON = 1e-8
return num / (denom + EPSILON)
def convert_to_numeric(series: pd.Series) -> pd.Series:
"""Retain and convert any numeric data points."""
return pd.to_numeric(series.copy(), errors="coerce").dropna()
def is_number_as_string(
series: pd.Series, shrinkage_threshold: float = 0.7
) -> bool:
"""
Check if string can be numerical.
Remove non-numerals from string and calculate relative reduction in string length.
shrinkage_threshold:
Numeric-like values that are extractable downstream
should have lengths below this value post-numeral removal.
Returns:
True if at least half of the values' relative post-shrinkage length is
at least `shrinkage_threshold`, and there is at least one value remaining
after numerical conversion.
"""
series = series.copy().astype(str)
nums_removed = series.apply(lambda x: re.sub(r"\D", "", x))
rel_post_shrinkage_len = _safe_div(
nums_removed.apply(len), series.apply(len)
)
most_values_contain_numbers = (
_safe_div(
(rel_post_shrinkage_len > shrinkage_threshold).sum(),
len(rel_post_shrinkage_len),
)
>= 0.5
)
at_least_one_value_remaining = bool(len(convert_to_numeric(series)))
return most_values_contain_numbers and at_least_one_value_remaining
def castable_as_numeric(series: pd.Series, threshold: float = 0.95) -> bool:
"""
Check if series values can be casted as numeric dtypes.
Returns:
True if at least `threshold` values can be casted as numerics.
"""
# Columns which are already of numeric dtype are considered not castable
if series.dtype in ["float", "int"]:
return False
return _safe_div(len(convert_to_numeric(series)), len(series)) >= threshold
def numeric_extractable(series: pd.Series, threshold: float = 0.95) -> bool:
"""
Check if numbers can be extracted from series values.
Returns:
True if at least `threshold` values contain numerics.
"""
# Columns which are already of numeric dtype are considered not extractable
if series.dtype in ["float", "int"]:
return False
series = series.copy().dropna().astype(str)
n_contains_digits = series.apply(
lambda x: any(char.isdigit() for char in x)
).sum()
return _safe_div(n_contains_digits, len(series)) >= threshold
def normalized_distinct_rate(df: pd.DataFrame) -> pd.Series:
"""
Calculate the % of distinct values relative to the number of non-null entries.
Arguments:
df {pd.DataFrame} -- Dataframe to analzye.
Returns:
pd.Series -- Normalized distinct rate.
"""
return _safe_div(df["num_distincts"], df["total_val"] - df["num_nans"])
def nan_rate(df: pd.DataFrame) -> pd.Series:
"""
Calculate the % of NaNs relative to the total number of data points.
Arguments:
df {pd.DataFrame} -- Dataframe to analyze.
Returns:
pd.Series -- NaN rate.
"""
return _safe_div(df["num_nans"], df["total_val"])
def avg_val_len(raw: pd.DataFrame) -> pd.Series:
"""
Get the average length values in the feature column.
Returns -1 if feature column is completely empty.
Arguments:
raw {pd.DataFrame} -- Raw dataframe to analyze.
Returns:
pd.Series -- Average length of elements in feature column
"""
result = []
for col in raw:
series = raw[col].dropna()
if not len(series):
result.append(-1)
continue
result.append(_safe_div(sum(len(str(x)) for x in series), len(series)))
return pd.Series(result, index=raw.columns)
def stddev_val_len(raw: pd.DataFrame) -> pd.Series:
"""
Get the standard deviation of length values in the feature column.
Returns -1 if feature column is completely empty.
Arguments:
raw {pd.DataFrame} -- Raw dataframe to analyze.
Returns:
pd.Series -- Standard deviation length of elements in feature column
"""
result = []
for col in raw:
series = raw[col].dropna()
if not len(series):
result.append(-1)
continue
result.append(np.std([len(str(x)) for x in series]))
return pd.Series(result, index=raw.columns)
def maybe_zipcode(raw: pd.DataFrame, threshold: float = 0.95) -> pd.Series:
"""
Infer if DataFrame might be a zipcode.
The three decision criteria are:
1. 'zip' appears in the name
2. At least `threshold` values look like US zipcodes (5 digits).
3. At least `threshold` values look like Canadian zipcodes (*#* #*#).
Arguments:
raw {pd.DataFrame} -- Raw pd.Series to analyze.
Keyword Arguments:
threshold {float} -- Minimum value for criterion to be considered met. (default: {0.95})
Returns:
pd.Series[int] -- Scores for each series in dataframe.
A point is given for each criterion met.
"""
return raw.apply(_maybe_zipcode)
def _maybe_zipcode(raw_s: pd.Series, threshold: float = 0.95) -> int:
"""
Infer if series might be a zipcode.
The three decision criteria are:
1. 'zip' appears in the name
2. At least `threshold` values look like US zipcodes (5 digits).
3. At least `threshold` values look like Canadian zipcodes (*#* #*#).
Arguments:
raw_s {pd.Series} -- Raw pd.Series to analyze.
Keyword Arguments:
threshold {float} -- Minimum value for criterion to be considered met. (default: {0.95})
Returns:
int -- Score. A point is given for each criterion met.
"""
_raise_if_not_pd_series(raw_s)
points = 0
# Criterion 1
if "zip" in str(raw_s.name):
points += 1
# Criterion 2
at_least_5_digits = raw_s.apply(
lambda x: len(str(x)) == 5 and str(x).isnumeric()
)
if _safe_div(at_least_5_digits.sum(), len(raw_s)) >= threshold:
points += 1
# Criterion 3
is_cad_zip = raw_s.apply(
lambda x: bool(re.search(r"\w\d\w\s?\d\w\d", str(x)))
)
if _safe_div(is_cad_zip.sum(), len(raw_s)) >= threshold:
points += 1
return points
def maybe_real_as_categorical(
df: pd.DataFrame, max_n_distinct: int = 20
) -> pd.Series:
"""
Evaluate if feature column might be categorical.
Check that values are numeric and at most `max_n_distinct` distinct values.
Arguments:
df {pd.DataFrame} -- Metafeatures.
Keyword Arguments:
max_n_distinct {int} -- Maximum number of default categories. (default: {20})
Returns:
pd.Series -- A boolean series on whether a model might be categorical or not.
"""
# Pick out sample columns, while ignoring other metafeatures including `samples_set`
samples = df[
[col for col in df.columns if "sample" in col and "samples" not in col]
]
is_numeric = []
for row in samples.itertuples(False):
coerced_numeric = pd.Series(
| pd.to_numeric(row, errors="coerce") | pandas.to_numeric |
import json
import os
import albumentations as alb
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from albumentations.pytorch import ToTensorV2
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from torch.utils.data import DataLoader, Dataset
from transformers import BeitFeatureExtractor
beit_feature_extractor = BeitFeatureExtractor.from_pretrained(
"microsoft/beit-base-patch16-224-pt22k-ft22k"
)
def binarize_df(label_path):
df = pd.read_csv(label_path)
df = df.dropna(axis=1, how="all") # save memory and process usage
df = df.fillna("None") # to avoid error
mlb = MultiLabelBinarizer()
result = mlb.fit_transform(
df.drop(columns=["filenames"]).values
) # drop not tagging cols
bin_df = | pd.DataFrame(result, columns=mlb.classes_) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*
import sys
sys.path.append('../') # or just install the module
sys.path.append('../../fuzzy-tools') # or just install the module
sys.path.append('../../astro-lightcurves-handler') # or just install the module
sys.path.append('../../astro-lightcurves-fats') # or just install the module
###################################################################################################################################################
import argparse
from fuzzytools.prints import print_big_bar
parser = argparse.ArgumentParser(prefix_chars='--')
parser.add_argument('--method', type=str)
parser.add_argument('--kf', type=str)
parser.add_argument('--mid', type=str, default='0')
parser.add_argument('--classifier_mids', type=int, default=2)
main_args = parser.parse_args()
print_big_bar()
###################################################################################################################################################
import numpy as np
from fuzzytools.files import load_pickle, save_pickle, get_dict_from_filedir
from lcfeatures.files import load_features
from fuzzytools.progress_bars import ProgressBar
from lcfeatures.classifiers import train_classifier, evaluate_classifier
import pandas as pd
filedir = f'../../surveys-save/survey=alerceZTFv7.1~bands=gr~mode=onlySNe~method={main_args.method}.splcds'
filedict = get_dict_from_filedir(filedir)
rootdir = filedict['_rootdir']
cfilename = filedict['_cfilename']
lcdataset = load_pickle(filedir)
lcset_info = lcdataset['raw'].get_info()
lcdataset.only_keep_kf(main_args.kf) # saves ram
# print(lcdataset)
train_modes = ['r', 's', 'r+s'] if main_args.method=='spm-mcmc-estw' else ['s', 'r+s']
for train_mode in train_modes:
for classifier_mid in range(0, main_args.classifier_mids):
print(f'training brf for train_mode={train_mode}; kf={main_args.kf}; method={main_args.method}; mid={main_args.mid}c{classifier_mid}')
train_df_x_r, train_df_y_r = load_features(f'../save/fats/{cfilename}/{main_args.kf}@train.df')
if train_mode=='r':
train_df_x = pd.concat([train_df_x_r], axis='rows')
train_df_y = | pd.concat([train_df_y_r], axis='rows') | pandas.concat |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_lazy():
# A mix of test_strategybase_universe and test_strategybase_allocate
# to make sure that assets with lazy_add work correctly.
c1 = SecurityBase('c1', multiplier=2, lazy_add=True, )
c2 = FixedIncomeSecurity('c2', lazy_add=True)
s = StrategyBase('s', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
s.adjust(1000)
s.allocate(100, 'c1')
s.allocate(100, 'c2')
c1 = s['c1']
c2 = s['c2']
assert c1.multiplier == 2
assert isinstance( c2, FixedIncomeSecurity)
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 = s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
# must setup tree because if not negative root error pops up first
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(100)
s.update(dts[0])
c1.adjust(-100)
s.update(dts[1])
try:
c1.adjust(-100)
s.update(dts[1])
assert False
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_strategybase_tree_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1', update=True)
assert s.root.stale == True
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
# Check that rebalance with update=False
# does not mark the node as stale
s.rebalance(0.6, 'c1', update=False)
assert s.root.stale == False
def test_strategybase_tree_decimal_position_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000.2)
s.rebalance(0.42, 'c1')
s.rebalance(0.58, 'c2')
aae(c1.value, 420.084)
aae(c2.value, 580.116)
aae(c1.value + c2.value, 1000.2)
def test_rebalance_child_not_in_tree():
s = StrategyBase('p')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
# rebalance to 0 w/ child that is not present - should ignore
s.rebalance(0, 'c2')
assert s.value == 1000
assert s.capital == 1000
assert len(s.children) == 0
def test_strategybase_tree_rebalance_to_0():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now rebalance c1
s.rebalance(0, 'c1')
assert c1.position == 0
assert c1.value == 0
assert s.capital == 1000
assert s.value == 1000
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_rebalance_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance child s1 - since its children are 0, no waterfall alloc
m.rebalance(0.5, 's1')
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
s1.rebalance(0.4, 'c1')
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now rebalance child s1 again and make sure c1 also gets proportional
# increase
m.rebalance(0.8, 's1')
assert s1.value == 800
aae(m.capital, 200, 1)
assert m.value == 1000
assert s1.weight == 800 / 1000
assert s2.weight == 0
assert c1.value == 300.0
assert c1.weight == 300.0 / 800
assert c1.position == 3
# now rebalance child s1 to 0 - should close out s1 and c1 as well
m.rebalance(0, 's1')
assert s1.value == 0
assert m.capital == 1000
assert m.value == 1000
assert s1.weight == 0
assert s2.weight == 0
assert c1.weight == 0
def test_strategybase_tree_rebalance_base():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# check that 2 rebalances of equal weight lead to two different allocs
# since value changes after first call
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2')
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
# close out everything
s.flatten()
# adjust to get back to 1000
s.adjust(4)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance but set fixed base
base = s.value
s.rebalance(0.5, 'c1', base=base)
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2', base=base)
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
def test_algo_stack():
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# no run_always for now
del a1.run_always
del a2.run_always
del a3.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert not a3.called
# now test that run_always marked are run
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# a3 will have run_always
del a1.run_always
del a2.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert a3.called
def test_set_commissions():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.set_commissions(lambda x, y: 1.0)
s.setup(data)
s.update(dts[0])
s.adjust(1000)
s.allocate(500, 'c1')
assert s.capital == 599
s.set_commissions(lambda x, y: 0.0)
s.allocate(-400, 'c1')
assert s.capital == 999
def test_strategy_tree_proper_return_calcs():
s1 = StrategyBase('s1')
s2 = StrategyBase('s2')
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.loc['c1', dts[1]] = 105
data.loc['c2', dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert m.price == 100
assert s1.value == 0
assert s2.value == 0
# now allocate directly to child
s1.allocate(500)
assert m.capital == 500
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.weight == 0
# allocate to child2 via parent method
m.allocate(500, 's2')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000
assert s2.price == 100
# now allocate and incur commission fee
s1.allocate(500, 'c1')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000.0
assert s2.price == 100
def test_strategy_tree_proper_universes():
def do_nothing(x):
return True
child1 = Strategy('c1', [do_nothing], ['b', 'c'])
parent = Strategy('m', [do_nothing], [child1, 'a'])
child1 = parent['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(
{'a': pd.Series(data=1, index=dts, name='a'),
'b': pd.Series(data=2, index=dts, name='b'),
'c': pd.Series(data=3, index=dts, name='c')})
parent.setup(data, test_data1 = 'test1')
assert len(parent.children) == 1
assert 'c1' in parent.children
assert len(parent._universe.columns) == 2
assert 'c1' in parent._universe.columns
assert 'a' in parent._universe.columns
assert len(child1._universe.columns) == 2
assert 'b' in child1._universe.columns
assert 'c' in child1._universe.columns
assert parent._has_strat_children
assert len(parent._strat_children) == 1
assert parent.get_data( 'test_data1' ) == 'test1'
# New child strategy with parent (and using dictionary notation}
child2 = Strategy('c2', [do_nothing], {'a' : SecurityBase(''), 'b' : ''}, parent=parent)
# Setup the child from the parent, but pass in some additional data
child2.setup_from_parent(test_data2 = 'test2')
assert 'a' in child2._universe.columns
assert 'b' in child2._universe.columns
assert 'c2' in parent._universe.columns
# Make sure child has data from the parent and the additional data
assert child2.get_data('test_data1') == 'test1'
assert child2.get_data('test_data2') == 'test2'
assert len(parent._strat_children) == 2
def test_strategy_tree_paper():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['a'], data=100.)
data['a'].loc[dts[1]] = 101
data['a'].loc[dts[2]] = 102
s = Strategy('s',
[bt.algos.SelectWhere(data > 100),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
m = Strategy('m', [], [s])
s = m['s']
m.setup(data)
m.update(dts[0])
m.run()
assert m.price == 100
assert s.price == 100
assert s._paper_trade
assert s._paper.price == 100
s.update(dts[1])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert s.price == 100
s.update(dts[2])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert np.allclose(s.price, 100. * (102 / 101.))
def test_dynamic_strategy():
def do_nothing(x):
return True
# Start with an empty parent
parent = Strategy('p', [do_nothing], [])
dts = pd.date_range('2010-01-01', periods=4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[2]] = 105.
data['c2'][dts[2]] = 95.
parent.setup( data )
# NOTE: Price of the sub-strategy won't be correct in this example because
# we are not using the algo stack to impact weights, and so the paper
# trading strategy does not see the same actions as we are doing.
i = 0
parent.adjust( 1e6 )
parent.update( dts[i] )
assert parent.price == 100.
assert parent.value == 1e6
i = 1
parent.update( dts[i] )
# On this step, we decide to put a trade on c1 vs c2 and track it as a strategy
trade = Strategy('c1_vs_c2', [], children = ['c1', 'c2'], parent = parent )
trade.setup_from_parent()
trade.update( parent.now )
assert trade.price == 100.
assert trade.value == 0
# Allocate capital to the trade
parent.allocate( 1e5, trade.name )
assert trade.value == 1e5
assert trade.price == 100.
# Go long 'c1' and short 'c2'
trade.rebalance( 1., 'c1')
trade.rebalance( -1., 'c2')
assert parent.universe[ trade.name ][ dts[i] ] == 100.
assert parent.positions['c1'][ dts[i] ] == 1e3
assert parent.positions['c2'][ dts[i] ] == -1e3
i = 2
parent.update( dts[i] )
assert trade.value == 1e5 + 10 * 1e3
assert parent.value == 1e6 + 10 * 1e3
# On this step, we close the trade, and allocate capital back to the parent
trade.flatten()
trade.update( trade.now ) # Need to update after flattening (for now)
parent.allocate( -trade.capital, trade.name )
assert trade.value == 0
assert trade.capital == 0
assert parent.value == 1e6 + 10 * 1e3
assert parent.capital == parent.value
assert parent.positions['c1'][ dts[i] ] == 0.
assert parent.positions['c2'][ dts[i] ] == 0.
i = 3
parent.update( dts[i] )
# Just make sure we can update one step beyond closing
# Note that "trade" is still a child of parent, and it also has children,
# so it will keep getting updated (and paper trading will still happen).
assert trade.value == 0
assert trade.capital == 0
assert trade.values[ dts[i] ] == 0.
def test_dynamic_strategy2():
# Start with an empty parent
parent = Strategy('p', [], [])
dts = pd.date_range('2010-01-01', periods=4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[2]] = 105.
data['c2'][dts[2]] = 95.
data['c1'][dts[3]] = 101.
data['c2'][dts[3]] = 99.
parent.setup( data )
i = 0
parent.adjust( 1e6 )
parent.update( dts[i] )
assert parent.price == 100.
assert parent.value == 1e6
i = 1
parent.update( dts[i] )
# On this step, we decide to put a trade on c1 vs c2 and track it as a strategy
def trade_c1_vs_c2( strategy ):
if strategy.now == dts[1]:
strategy.rebalance( 1., 'c1')
strategy.rebalance( -1., 'c2')
trade = Strategy('c1_vs_c2', [trade_c1_vs_c2], children = ['c1', 'c2'], parent = parent )
trade.setup_from_parent()
trade.update( parent.now )
assert trade.price == 100.
assert trade.value == 0
# Allocate capital to the trade
parent.allocate( 1e5, trade.name )
assert trade.value == 1e5
assert trade.price == 100.
# Run the strategy for the timestep
parent.run()
assert parent.universe[ trade.name ][ dts[i] ] == 100.
assert np.isnan( parent.universe[ trade.name ][ dts[0] ] )
assert parent.positions['c1'][ dts[i] ] == 1e3
assert parent.positions['c2'][ dts[i] ] == -1e3
i = 2
parent.update( dts[i] )
trade = parent[ trade.name ]
assert trade.value == 1e5 + 10 * 1e3
assert parent.value == 1e6 + 10 * 1e3
aae( trade.price, 110.)
# Next we close the trade by flattening positions
trade.flatten()
trade.update( trade.now ) # Need to update after flattening (for now)
aae( trade.price, 110.)
# Finally we allocate capital back to the parent to be re-deployed
parent.allocate( -trade.capital, trade.name )
assert trade.value == 0
assert trade.capital == 0
aae( trade.price, 110.) # Price stays the same even after capital de-allocated
assert parent.value == 1e6 + 10 * 1e3
assert parent.capital == parent.value
assert parent.positions['c1'][ dts[i] ] == 0.
assert parent.positions['c2'][ dts[i] ] == 0.
i = 3
parent.update( dts[i] )
# Just make sure we can update one step beyond closing
assert parent.value == 1e6 + 10 * 1e3
# Note that "trade" is still a child of parent, and it also has children,
# so it will keep getting updated (and paper trading will still happen).
assert trade.value == 0
assert trade.capital == 0
assert trade.values[ dts[i] ] == 0.
# Paper trading price, as asset prices have moved, paper trading price
# keeps updating. Note that if the flattening of the position was part
# of the definition of trade_c1_vs_c2, then the paper trading price
# would be fixed after flattening, as it would apply to both real and paper.
aae( trade.price, 102.)
aae( parent.universe[ trade.name ][ dts[i] ], 102. )
def test_outlays():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
#calling outlays should automatically update the strategy, since stale
assert c1.outlays[dts[0]] == (4 * 105)
assert c2.outlays[dts[0]] == (5 * 95)
assert c1.data['outlay'][dts[0]] == (4 * 105)
assert c2.data['outlay'][dts[0]] == (5 * 95)
i = 1
s.update(dts[i], data.loc[dts[i]])
c1.allocate(-400)
c2.allocate(100)
# out update
assert c1.outlays[dts[1]] == (-4 * 100)
assert c2.outlays[dts[1]] == 100
assert c1.data['outlay'][dts[1]] == (-4 * 100)
assert c2.data['outlay'][dts[1]] == 100
def test_child_weight_above_1():
# check for child weights not exceeding 1
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(np.random.randn(3, 2) + 100,
index=dts, columns=['c1', 'c2'])
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1e6)
s.allocate(1e6, 'c1')
c1 = s['c1']
assert c1.weight <= 1
def test_fixed_commissions():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
# fixed $1 commission per transaction
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
# out update
s.update(dts[i])
assert c1.value == 400
assert c2.value == 400
assert s.capital == 198
# de-alloc 100 from c1. This should force c1 to sell 2 units to raise at
# least 100 (because of commissions)
c1.allocate(-100)
s.update(dts[i])
assert c1.value == 200
assert s.capital == 198 + 199
# allocate 100 to c2. This should leave things unchaged, since c2 cannot
# buy one unit since the commission will cause total outlay to exceed
# allocation
c2.allocate(100)
s.update(dts[i])
assert c2.value == 400
assert s.capital == 198 + 199
# ok try again w/ 101 allocation. This time, it should work
c2.allocate(101)
s.update(dts[i])
assert c2.value == 500
assert s.capital == 198 + 199 - 101
# ok now let's close the whole position. Since we are closing, we expect
# the allocation to go through, even though the outlay > amount
c2.allocate(-500)
s.update(dts[i])
assert c2.value == 0
assert s.capital == 198 + 199 - 101 + 499
# now we are going to go short c2
# we want to 'raise' 100 dollars. Since we need at a minimum 100, but we
# also have commissions, we will actually short 2 units in order to raise
# at least 100
c2.allocate(-100)
s.update(dts[i])
assert c2.value == -200
assert s.capital == 198 + 199 - 101 + 499 + 199
def test_degenerate_shorting():
# can have situation where you short infinitely if commission/share > share
# price
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
# $1/share commission
s.set_commissions(lambda q, p: abs(q) * 1)
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
# c1 trades at 0.01
data = pd.DataFrame(index=dts, columns=['c1'], data=0.01)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
try:
c1.allocate(-10)
assert False
except Exception as e:
assert 'full_outlay should always be approaching amount' in str(e)
def test_securitybase_allocate():
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
c1 = s['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100.)
# set the price
data['c1'][dts[0]] = 91.40246706608193
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 100000 to strategy
original_capital = 100000.
s.adjust(original_capital)
# not integer positions
c1.integer_positions = False
# set the full_outlay and amount
full_outlay = 1999.693706988672
amount = 1999.6937069886717
c1.allocate(amount)
# the results that we want to be true
assert np.isclose(full_outlay ,amount,rtol=0.)
# check that the quantity wasn't decreased and the full_outlay == amount
# we can get the full_outlay that was calculated by
# original capital - current capital
assert np.isclose(full_outlay, original_capital - s._capital, rtol=0.)
def test_securitybase_allocate_commisions():
date_span = pd.date_range(start='10/1/2017', end='10/11/2017', freq='B')
numper = len(date_span.values)
comms = 0.01
data = [[10, 15, 20, 25, 30, 35, 40, 45],
[10, 10, 10, 10, 20, 20, 20, 20],
[20, 20, 20, 30, 30, 30, 40, 40],
[20, 10, 20, 10, 20, 10, 20, 10]]
data = [[row[i] for row in data] for i in range(len(data[0]))] # Transpose
price = pd.DataFrame(data=data, index=date_span)
price.columns = ['a', 'b', 'c', 'd']
# price = price[['a', 'b']]
sig1 = pd.DataFrame(price['a'] >= price['b'] + 10, columns=['a'])
sig2 = pd.DataFrame(price['a'] < price['b'] + 10, columns=['b'])
signal = sig1.join(sig2)
signal1 = price.diff(1) > 0
signal2 = price.diff(1) < 0
tw = price.copy()
tw.loc[:,:] = 0 # Initialize Set everything to 0
tw[signal1] = -1.0
tw[signal2] = 1.0
s1 = bt.Strategy('long_short', [bt.algos.WeighTarget(tw),
bt.algos.RunDaily(),
bt.algos.Rebalance()])
####now we create the Backtest , commissions=(lambda q, p: abs(p * q) * comms)
t = bt.Backtest(s1, price, initial_capital=1000000, commissions=(lambda q, p: abs(p * q) * comms), progress_bar=False)
####and let's run it!
res = bt.run(t)
########################
def test_strategybase_tree_transact():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
##############################
##### DO NOT TOUCH BELOW #####
##############################
# Import packages and start CAS session
import swat, sys
conn = swat.CAS()
table = sys.argv[1]
nodeid = sys.argv[2]
caslib = sys.argv[3]
# Bring data locally
df = conn.CASTable(caslib = caslib, name = table).to_frame()
##############################
##### DO NOT TOUCH ABOVE #####
##############################
# Import packages
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.ensemble import GradientBoostingClassifier
#############
# Data prep #
#############
### Modify pandas dataframe called df ###
# Impute missing values
## Most frequent
df['IMP_REASON']=df['REASON'].fillna('DebtCon')
df['IMP_JOB']=df['JOB'].fillna('Other')
## Mean
mean_imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
mean=pd.DataFrame(mean_imp.fit_transform(df[['CLAGE','MORTDUE','NINQ','DEROG']]), columns=['IMP_CLAGE','IMP_MORTDUE','IMP_NINQ','IMP_DEROG'])
## Median
median_imp = Imputer(missing_values='NaN', strategy='most_frequent', axis=0)
median = pd.DataFrame(median_imp.fit_transform(df[['DELINQ','VALUE','CLNO','DEBTINC','YOJ']]), columns=['IMP_DELINQ','IMP_VALUE','IMP_CLNO','IMP_DEBTINC','IMP_YOJ'])
## Bring together
df=pd.concat([df[['_dmIndex_', '_PartInd_', 'BAD', 'LOAN']], df.iloc[:,-2:], mean, median], axis=1)
# One-hot encode character variables
dtypes = df.dtypes
nominals = dtypes[dtypes=='object'].keys().tolist()
df = pd.concat([df, | pd.get_dummies(df[nominals]) | pandas.get_dummies |
import pandas as pd
import numpy as np
from multiprocessing import Pool
import tqdm
import sys
import gzip as gz
from tango.prepare import init_sqlite_taxdb
def translate_taxids_to_names(res_df, reportranks, name_dict):
"""
Takes a pandas dataframe with ranks as columns and contigs as rows and taxids as values and translates taxids
to names column by column using a taxid->name dictionary
Parameters
----------
res_df: pandas.DataFrame
Results with taxids
reportranks: list
List of taxonomic ranks to report results for
name_dict: dictionary
Dictionary mapping taxids -> names
Returns
-------
res: pandas.DataFrame
Dataframe with names instead of taxids
"""
res = {}
for rank in reportranks:
res[rank] = [name_dict[taxid] for taxid in res_df.loc[:,rank]]
res = pd.DataFrame(res)
res.index = res_df.index
res = res.loc[:, reportranks]
return res
def get_thresholds(df, top=10):
"""
Here bit-score thresholds are calculated per query an returned in a dictionary.
The pandas DataFrame is first sorted by bitscore (high to low), then grouped by query, then for the first entry
per query the top% of the best hit is calculated and converted to dictionary.
Parameters
----------
df: pandas.DataFrame
DataFrame of diamond results
top: int
Percentage range of top bitscore
Returns
-------
thresholds: dict
Dictionary with queries as keys and bitscore thresholds as values
"""
thresholds = (df.sort_values("bitscore", ascending=False).groupby(level=0).first().bitscore * (
(100 - top)) / 100).to_dict()
return thresholds
def get_rank_thresholds(ranks, thresholds):
"""
Constructs dictionary of rank-specific thresholds
Parameters
----------
ranks: list
Taxonomic ranks to assign
thresholds: list
Thresholds for taxonomic ranks
Returns
-------
Dictionary of thresholds
"""
t_len, r_len = len(thresholds), len(ranks)
if t_len != r_len:
sys.exit("ERROR: Number of taxonomic ranks ({}) and number of thresholds ({}) differ\n".format(r_len, t_len))
return dict(zip(ranks, thresholds))
def add_names(x, taxid, ncbi_taxa):
"""
This function translates taxonomy ids to names. It operates per-row in the lineage dataframe.
Parameters
----------
x: pandas.DataFrame
DataFrame of one taxid and its taxonomic ranks
taxid: int
Taxid being evaluated
ncbi_taxa: ete3.ncbi_taxonomy.ncbiquery.NCBITaxa
The ete3 sqlite database connection
Returns
-------
The original DataFrame merged with the taxa names
"""
# Get a names dictionary for all taxids in the row
names = ncbi_taxa.get_taxid_translator(list(x.loc[taxid].values) + [taxid])
n = {}
# Iterate ranks
for rank in list(x.columns):
# Get taxid for the current rank
t = x.loc[taxid, rank]
# If taxid is negative it means that there is no classified taxonomy at this rank
# Instead we get the last known name in the hierarchy. We can then use the negative values to translate into
# the name with the "Unclassified." prefix.
# If the name is 'root' we just use 'Unclassified'
if t < 0:
known_name = names[-t]
if known_name == "root":
name = "Unclassified"
else:
name = known_name
# If taxid is positive we just use the name from the dictionary
else:
name = names[t]
# Add name to a dictionary with keys in the form of {rank}.name
n["{}.name".format(rank)] = name
name_df = pd.DataFrame(n, index=[taxid])
return pd.merge(x, name_df, left_index=True, right_index=True)
def propagate_lower(x, taxid, ranks):
"""
Shift known ranks down through the taxonomic hierarchy.
Parameters
----------
x: pandas.DataFrame
DataFrame of one taxid and its taxonomic ranks
taxid: int
Taxid being evaluated
ranks: list
Ranks used for assigning
Returns
-------
pandas.DataFrame updated with missing ranks
Some proteins in the database may map to a taxonomic rank above the lowest taxonomic rank that we are trying to
assign. For instance, if we use the ranks 'superkingdom phylum genus species' and a protein maps to a taxid at
rank phylum then we want to add the taxonomic information at the genus and species levels. This is done here by
adding the negative taxid of the lowest known rank to the lower ranks.
Example:
In the Uniref90 database the entry 'E1GVX1' maps to taxonomy id 838 (rank: genus, name: Prevotella).
When creating the lineage for taxid 838 we add '-838' to rank species.
"""
rev_ranks = [ranks[x] for x in list(range(len(ranks) - 1, -1, -1))]
missing = {}
known = taxid
for rank in rev_ranks[0:]:
if rank not in x.columns:
missing[rank] = -known
else:
known = x.loc[taxid, rank]
return pd.merge(x, pd.DataFrame(missing, index=[taxid]), left_index=True, right_index=True)
def get_lca(r, assignranks, reportranks):
"""
Assign lowest common ancestor from a set of taxids.
Parameters
----------
r: pandas.DataFrame
Results for a single query, extracted from the main diamond results file
assignranks: list
Taxonomic ranks to assign taxonomy for
reportranks: list
Taxonomic ranks to report taxonomy for
Returns
-------
a tuple of dictionaries with ranks as keys and taxa names/ids as values
This function takes a query-slice of the diamond results after filtering by score (and rank-threshold if tango mode
is 'rank_lca' or 'rank_vote'). It then iterates through each rank in reverse order checks how many unique taxids are
found at that rank. If there's only one taxid
"""
query = r.index.unique()[0]
# Reverse ranks for iterating
rev_ranks = [assignranks[x] for x in list(range(len(assignranks) - 1, -1, -1))]
# Iterate through the assignranks
for rank in rev_ranks:
higher_ranks = reportranks[0:reportranks.index(rank) + 1]
higher_rank_names = ["{}.name".format(x) for x in higher_ranks]
# Count number of taxa at rank
c = r.groupby(rank).count()
# If there's only one taxa then we have found the LCA
if len(c) == 1:
if len(r) == 1:
lca_taxids = r.loc[query, higher_ranks].values
else:
lca_taxids = r.loc[query, higher_ranks].values[0]
return dict(zip(higher_ranks, lca_taxids))
return {}
def parse_with_rank_thresholds(r, assignranks, reportranks, rank_thresholds, mode, vote_threshold):
"""Assigns taxonomy using rank_specific thresholds
The ranks used to assign taxonomy are iterated in reverse (e.g. species, genus, phylum),
at each rank results are filtered by the corresponding rank threshold,
if no hits remain after filtering the next rank is evaluated,
Then, if mode=='rank_lca', for remaining hits, a lowest common ancestor is calculated from all remaining taxids.
However, if mode=='rank_vote', taxids are counted among the remaining hits and all results matching taxids
that occur more than vote_threshold are used to determine the lowest common ancestor.
If a taxonomy can be assigned at a rank, it is returned directly. If no taxonomy can be assigned at any of the
ranks, empty results are returned.
Parameters
----------
r: pandas.DataFrame
Dataframe slice for a query
assignranks: list
Taxonomic ranks used to assign taxonomy
reportranks: list
Taxonomic ranks at which taxonomy is reported
rank_thresholds: dict
Dictionary of rank_specific thresholds
mode: str
'rank_lca' or 'rank_vote'
vote_threshold: float
Cutoff used to filter out common taxids
Returns
-------
tuple
Dictionaries with taxonomy names and taxonomy ids at each rank
"""
# Start from lowest rank
rev_ranks = [assignranks[x] for x in list(range(len(assignranks) - 1, -1, -1))]
for rank in rev_ranks:
# Make sure that LCA is not set below current rank
allowed_ranks = assignranks[0:assignranks.index(rank) + 1]
# Get rank threshold
threshold = rank_thresholds[rank]
# Filter results by rank threshold
try:
_r = r.loc[r.pident >= threshold]
except KeyError:
continue
if len(_r) == 0:
continue
lca_taxids = {}
# After filtering, either calculate lca from all filtered taxids
if mode == "rank_lca":
lca_taxids = get_lca(_r, allowed_ranks, reportranks)
# Or at each rank, get most common taxid
elif mode == "rank_vote":
vote = get_rank_vote(_r, rank, vote_threshold)
if len(vote) > 0:
lca_taxids = get_lca(vote, allowed_ranks, reportranks)
if len(lca_taxids.keys()) > 0:
return lca_taxids
return {}
def get_rank_vote(r, rank, vote_threshold=0.5):
"""
Filter results based on fraction of taxa
Parameters
----------
r: pandas.DataFrame
Results for a single query, after filtering with bitscore and rank-specific thresholds
rank: str
Current rank being investigated
vote_threshold: float
Required fraction of hits from a single taxa in order to keep taxa
Returns
-------
Filtered dataframe only containing taxa that meet vote_threshold
Here taxa are counted among all hits remaining for a query after filtering using bitscore and rank-specific
thresholds. Taxa are counted at a certain rank and counts are normalized. Hits belonging to taxa above
vote_threshold are kept while others are filtered out.
"""
# Create dataframe for unique taxids filtered at this rank threshold
taxid_counts = pd.DataFrame(dict.fromkeys(r.staxids.unique(), 1), index=["count"]).T
# Add taxid for rank being investigated
rank_df = r.groupby("staxids").first().reset_index()[[rank, "staxids"]].set_index("staxids")
rank_df = pd.merge(taxid_counts, rank_df, left_index=True, right_index=True)
# Sum counts for current rank
rank_sum = rank_df.groupby(rank).sum()
rank_norm = rank_sum.div(rank_sum.sum())
rank_norm = rank_norm.sort_values("count", ascending=False)
votes = rank_norm.loc[rank_norm["count"] > vote_threshold]
if len(votes) > 0:
return r.loc[r[rank].isin(votes.index)]
return []
def propagate_taxids(res, ranks):
"""
Transfer taxonomy ids to unassigned ranks based on best known taxonomy
Example:
{'species': -1, 'family': -171549, 'genus': -171549, 'order': 171549, 'phylum': 976, 'class': 200643, 'superkingdom': 2}
should become
{'species': -171549, 'family': -171549, 'genus': -171549, 'order': 171549, 'phylum': 976, 'class': 200643, 'superkingdom': 2}
Parameters
----------
res: dict
Dictionary of ranks and taxonomy ids
ranks: list
Ranks to assign taxonomy to
Returns
-------
res: dict
Dictionary with updated taxonomy ids
"""
known = -1
for rank in ranks:
# If not -1 (Unclassified) at rank, store assignment as known
if res[rank] != -1:
known = res[rank]
continue
# If -1 at rank (Unclassified), add the taxid with the '-' prefix
if res[rank] == -1:
res[rank] = -abs(known)
return res
def series2df(df):
"""Converts pandas series to pandas dataframe"""
if str(type(df)) == "<class 'pandas.core.series.Series'>":
df = pd.DataFrame(df).T
return df
def read_taxidmap(f, ids):
"""
Reads the protein to taxid map file and stores mappings
Parameters
----------
f: str
Input file with protein_id->taxid map
ids: list
Protein ids to store taxids for
Returns
-------
Dictionary of protein ids to taxid and all unique taxids
"""
taxidmap = dict.fromkeys(ids, -1)
open_function = open
if ".gz" in f:
open_function = gz.open
with open_function(f, 'rt') as fhin:
for line in tqdm.tqdm(fhin, desc="Reading idmap {}".format(f), ncols=100, unit=" lines"):
items = (line.rstrip()).rsplit()
# If file has only two columns, assume taxid in second
if len(items) == 2:
protid, taxid = items
# Otherwise, assume format is same as NCBI protein mapping
else:
protid, taxid = items[0], items[2]
# Add map to dictionary
# We initialize the dictionary with -1 so we make an attempt to add the taxid + 1
# If the protid is not in the dictionary we skip it
try:
taxidmap[protid] += int(taxid) + 1
except KeyError:
continue
except ValueError:
continue
return pd.DataFrame(taxidmap, index=["staxids"]).T, list(set(taxidmap.values()))
def read_df(infile, top=10, e=0.001, input_format="tango", taxidmap=None):
"""
Reads the blast results from file and returns a dictionary with query->results.
Note that the input is assumed to be sorted by bitscore for each query. The first entry for a query is used to set
the score threshold for storing hits for that query. So if a query has a bitscore of 100 and --top 10 is specified
then we only store subsequent hits that have a bitscore of at least (100-0.1*100) = 90.
Tango-formatted output contains two additional compared to the standard blast format 6:
query1 subject1 93.6 47 3 0 146 6 79 125 8.5e-16 91.3 314295
query1 subject2 100.0 44 0 0 137 6 484 527 2.5e-15 89.7 9347
query2 subject3 53.5 241 84 2 645 7 15 255 1.3e-53 216.9 864142
where the last column is the taxid of the subject.
Otherwise the output may have the typical blast format 6 output.
Parameters
----------
infile: str
Arguments from argument parser
top: int
Keep results within top% of best bitscore
e: float
Maximum allowed e-value to keep a hit.
input_format: str
Blast format. 'tango' if taxid for each subject is present in blast results, otherwise 'blast'
taxidmap: str
File mapping each subject id to a taxid
Returns
-------
tuple
The function returns a tuple with dictionary of query->results and
unique taxonomy ids (if tango format) or unique subject ids
"""
open_function = open
if ".gz" in infile:
open_function = gz.open
r = {}
taxids = []
queries = {}
with open_function(infile, 'rt') as fhin:
for line in tqdm.tqdm(fhin, desc="Reading {}".format(infile), ncols=100, unit=" lines"):
items = line.rstrip().rsplit()
query, subject, pident, evalue, score = items[0], items[1], float(items[2]), \
float(items[10]), float(items[11])
try:
min_score = queries[query]['min_score']
except KeyError:
min_score = score * ((100 - top) / 100)
queries[query] = {'min_score': min_score}
if score < min_score or evalue > e:
continue
if input_format == "tango" and len(items) > 12:
taxid = items[12]
taxids.append(taxid)
# TODO: Is there a way to skip storing the same taxid from a worse hit for the same query
elif input_format == "blast" and len(items) == 12:
taxid = ""
if not taxidmap:
sys.exit(
"ERROR: Standard blast input detected with no protein -> taxid file specified (--taxidmap).")
else:
continue
# Add results for query to dictionary
try:
r[query] += [[subject, pident, evalue, score, int(taxid)]]
except KeyError:
r[query] = [[subject, pident, evalue, score, int(taxid)]]
# If this is blast format then we return all subject ids found
if input_format == "blast":
ids = list(set([r[key][i][0] for key in list(r.keys()) for i in range(0, len(r[key]))]))
return r, ids
# If this is tango format then return all taxids found
return r, list(set(taxids))
def process_lineages(items):
"""
Looks up lineage information from taxids.
The lineage object is a list of taxonomic ids corresponding to the full lineage of a single taxid.
"""
taxid, ranks, taxdir, dbname, lineage = items
# Read the taxonomy db
ncbi_taxa = init_sqlite_taxdb(taxdir, dbname)
# Get ranks for each taxid in the lineage
lineage_ranks = ncbi_taxa.get_rank(lineage)
x = pd.DataFrame(lineage_ranks, index=["rank"]).T
x = x.loc[x["rank"].isin(ranks)].reset_index().T
x.columns = x.loc["rank"]
x.drop("rank", inplace=True)
x.index = [taxid]
# Add taxids for lower ranks in the hierarchy
x = propagate_lower(x, taxid, ranks)
# Add names for taxids
x = add_names(x, taxid, ncbi_taxa)
return x
def make_name_dict(df, ranks):
"""
Creates a dictionary of taxids to taxonomy names, including Unclassified ranks
Parameters
----------
df: pandas.DataFrame
Lineage dataframe
ranks: list
Ranks to store names information for
Returns
-------
name_dict: dict
Name dictionary mapping taxonomy ids to names
"""
name_dict = {}
for rank in ranks:
name_dict.update(dict(zip(df[rank].values, df["{}.name".format(rank)].values)))
name_dict.update(dict(zip(-abs(df[rank]), "Unclassified." + df["{}.name".format(rank)])))
name_dict[-1] = "Unclassified"
return name_dict
def make_lineage_df(taxids, taxdir, dbname, ranks, cpus=1):
"""
Creates a lineage dataframe with full taxonomic information for a list of taxids.
Example:
taxid species phylum genus genus.name phylum.name species.name
859655 305 1224 48736 Ralstonia Proteobacteria Ralstonia solanacearum
387344 1580 1239 1578 Lactobacillus Firmicutes Lactobacillus brevis
358681 1393 1239 55080 Brevibacillus Firmicutes Brevibacillus brevis
Parameters
----------
taxids: list
List of taxonomic ids to obtain information for
taxdir: str
Path to directory holding taxonomic info
dbname: str
Name of ete3 sqlite database within taxdir
ranks: list
Ranks to store information for
cpus: int
Number of cpus to use
Returns
-------
lineage_df: pandas.DataFrame
Data Frame with full taxonomic info
"""
# Read the taxonomy db
ncbi_taxa = init_sqlite_taxdb(taxdir, dbname)
lineages = ncbi_taxa.get_lineage_translator(taxids)
# Store potential missing taxids and warn user
missing_taxids = set([int(x) for x in taxids]).difference(lineages.keys())
# Get possible translations for taxids that have been changed
_, translate_dict = ncbi_taxa._translate_merged(list(set(taxids).difference(lineages.keys())))
rename = {y: x for x, y in translate_dict.items()}
# Update lineages with missing taxids
lineages.update(ncbi_taxa.get_lineage_translator(translate_dict.values()))
items = [[taxid, ranks, taxdir, dbname, lineages[taxid]] for taxid in list(lineages.keys())]
with Pool(processes=cpus) as pool:
res = list(
tqdm.tqdm(pool.imap(process_lineages, items), desc="Making lineages", total=len(items),
unit=" taxids", ncols=100))
lineage_df = pd.concat(res, sort=False)
lineage_df.rename(index=rename, inplace=True)
lineage_df.rename(index=lambda x: int(x), inplace=True)
for rank in ranks:
lineage_df[rank] = | pd.to_numeric(lineage_df[rank]) | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# ## Predictive Analysis on Bank Marketing Dataset :
#
# ### Bank Marketing Dataset contains both type variables 'Categorical' and 'Numerical'.
#
# ### Categorical Variable :
#
# * Marital - (Married , Single , Divorced)",
# * Job - (Management,BlueCollar,Technician,entrepreneur,retired,admin.,services,selfemployed,housemaid,student,unemployed,unknown)
# * Contact - (Telephone,Cellular,Unknown)
# * Education - (Primary,Secondary,Tertiary,Unknown)
# * Month - (Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec)
# * Poutcome - (Success,Failure,Other,Unknown)
# * Housing - (Yes/No)
# * Loan - (Yes/No)
# * deposit - (Yes/No)
# * Default - (Yes/No)
#
# ### Numerical Variable:
# * Age
# * Balance
# * Day
# * Duration
# * Campaign
# * Pdays
# * Previous
#
#
#
#
# In[ ]:
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn import metrics as m
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import roc_auc_score
data = pd.read_csv("../../../input/rouseguy_bankbalanced/bank.csv",sep=',',header='infer')
data = data.drop(['day','poutcome'],axis=1)
def binaryType_(data):
data.deposit.replace(('yes', 'no'), (1, 0), inplace=True)
data.default.replace(('yes','no'),(1,0),inplace=True)
data.housing.replace(('yes','no'),(1,0),inplace=True)
data.loan.replace(('yes','no'),(1,0),inplace=True)
#data.marital.replace(('married','single','divorced'),(1,2,3),inplace=True)
data.contact.replace(('telephone','cellular','unknown'),(1,2,3),inplace=True)
data.month.replace(('jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec'),(1,2,3,4,5,6,7,8,9,10,11,12),inplace=True)
#data.education.replace(('primary','secondary','tertiary','unknown'),(1,2,3,4),inplace=True)
return data
data = binaryType_(data)
# for i in range(len(data.marital.unique())):
# data["marital_"+str(data.marital.unique()[i])] = (data.marital == data.marital.unique()[i]).astype(int)
# for j in range(len(data.job.unique())):
# data["job_"+str(data.job.unique()[j])] = (data.job == data.job.unique()[j]).astype(int)
# for k in range(len(data.contact.unique())):
# data["contact_"+str(data.contact.unique()[k])] = (data.contact == data.contact.unique()[k]).astype(int)
# for l in range(len(data.education.unique())):
# data['education_'+str(data.education.unique()[l])] = (data.education == data.education.unique()[l]).astype(int)
# for n in range(len(data.month.unique())):
# data['month_'+str(data.month.unique()[n])] = (data.month == data.month.unique()[n]).astype(int)
#print(data.is_success.value_counts())
#print(data.describe())
#print(data.head())
# ### Outlier :
# Data_point > (Q3 * 1.5) is said to be outlier where Q3 is 75% Quantile !
#
# ### Age:
# * Average age of the people in the dataset is ~41 with std of 10.61
# * Min. age is 18
# * Max. age is 95
# * quantile 75%(percentile) refers that 75 percentage of the people have 49 or less age.
# * As 95 is max, there is great chance that its a outlier "49*(3/2) = 73.5". So anything greater than 73.5 is outlier.
#
# ### Balance:
# * Average balance of the people in the dataset is (approx)1528.53 with std of 3255.41, as standard deviation is quite huge it means that balance is wide spread across the dataset.
# * Min. balance is -6847
# * Max. balance is 81204
# * quantile 75%(percentile) refers that 75 percentage of the people have 1708 or less balance.
# * while comparing with 75% quantile, 81204 is very huge and its a outlier data point.
#
# ### Duration:
# * Average duration of the people speaking in the dataset is (approx)371 with std of 347, as standard deviation is quite huge it means that duration is wide spread across the dataset.
# * Min. duration is 2
# * Max. duration is 3881
# * quantile 75%(percentile) refers that 75 percentage of the people spoke for 496 seconds or less.
# * while comparing with 75% quantile, 3881 is a outlier data point.
#
# ### Pdays:
# * Average no. of days passed after the client was contacted from previous campaign in the dataset is (approx)51.33 with std of 108.75.
# * Min. pdays is -1
# * Max. pdays is 854
# * quantile 75%(percentile),for 75% of records it is 20.75 days, which means the Client was frequently contacted.
#
# ### Campaign:
# * Average no. of contacts performed during the current campaign for a client in the dataset is (approx)2.50 with std of 2.72.
# * Min. balance is 1
# * Max. balance is 63
# * quantile 75%(percentile),for 75% of records, 3 times the client has been contacted in the current campaign for a client.
# * while comparing with 75% quantile,63 is a outlier data point.
#
# ### Previous:
# * Average no. of contacts performed before this campaign for a client in the dataset is (approx)0.83 with std of 2.29.
# * Min. balance is 0.
# * Max. balance is 58
# * quantile 75%(percentile),for 75% of records, 1 times the client has been contacted before this campaign.
# * while comparing with 75% quantile,58 is a outlier data point.
#
# In[ ]:
plt.hist((data.duration),bins=100)
print()
# In[ ]:
plt.hist(data.age,bins=10)
print()
# In[ ]:
plt.hist(data.balance,bins=1000)
print()
# **Above, All the Histogram suggest that data is skewed towards left i.e. existence of skewness brings us to a point that we need to sample the data efficiently while classifiying the train_data and test_data !**
# In[ ]:
fig = plt.figure(1, figsize=(9, 6))
ax1 = fig.add_subplot(211)
bp1 = ax1.boxplot(data.balance,0,'')
ax2 = fig.add_subplot(212)
bp2 = ax2.boxplot(data.balance,0,'gD')
print()
# In[ ]:
fig = plt.figure(1, figsize=(6, 6))
ax = fig.add_subplot(211)
bp = ax.boxplot(data.age,0,'')
ax = fig.add_subplot(212)
bp = ax.boxplot(data.age,0,'gD')
print()
# In[ ]:
fig = plt.figure(1, figsize=(9, 6))
ax1 = fig.add_subplot(211)
bp1 = ax1.boxplot(data.duration,0,'')
ax2 = fig.add_subplot(212)
bp2 = ax2.boxplot(data.duration,0,'gD')
print()
#
# Above boxplot suggest how the data is spread across the dataset
# ** Most of the data is lying above the 3rd quantile by multiplication factor of 1.5 i.e. by theortical aspect the data points are outlier for most of the data points.**
# In[ ]:
draw_data = pd.crosstab(data.housing, data.deposit)
draw_data.div(draw_data.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True, color=['deepskyblue','steelblue'],grid=False, figsize=(15, 5))
print()
# In[ ]:
draw_data = pd.crosstab(data.default, data.deposit)
draw_data.div(draw_data.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True, color=['deepskyblue','steelblue'],grid=False, figsize=(15, 5))
print()
# In[ ]:
draw_data = pd.crosstab(data.loan, data.deposit)
draw_data.div(draw_data.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True, color=['deepskyblue','steelblue'],grid=False, figsize=(15, 5))
print()
# By looking at the bar graph, we can observe that Feature vs Label the data is wide spread i.e. we cannot predict completely based on feature alone.
# **Feature Engineering**
# * First, We can convert the duration from Seconds to Minutes and then making it as categorical feature.
# * Converting the age of the person into categorical feature by segregating the age as Adult , Middle Aged and old.
# * Similarly we can converting the continous feature value into discrete feature value.
# In[ ]:
#data['duration'] = data['duration']/60
def age_(data):
data['Adult'] = 0
data['Middle_Aged'] = 0
data['old'] = 0
data.loc[(data['age'] <= 35) & (data['age'] >= 18),'Adult'] = 1
data.loc[(data['age'] <= 60) & (data['age'] >= 36),'Middle_Aged'] = 1
#data.loc[(data['age'] <= 60) & (data['age'] >= 46),'Elderly'] = 1
data.loc[data['age'] >=61,'old'] = 1
return data
def campaign_(data):
data.loc[data['campaign'] == 1,'campaign'] = 1
data.loc[(data['campaign'] >= 2) & (data['campaign'] <= 3),'campaign'] = 2
data.loc[data['campaign'] >= 4,'campaign'] = 3
return data
def duration_(data):
data['t_min'] = 0
data['t_e_min'] = 0
data['e_min']=0
data.loc[data['duration'] <= 5,'t_min'] = 1
data.loc[(data['duration'] > 5) & (data['duration'] <= 10),'t_e_min'] = 1
data.loc[data['duration'] > 10,'e_min'] = 1
return data
def pdays_(data):
data['pdays_not_contacted'] = 0
data['months_passed'] = 0
data.loc[data['pdays'] == -1 ,'pdays_not_contacted'] = 1
data['months_passed'] = data['pdays']/30
data.loc[(data['months_passed'] >= 0) & (data['months_passed'] <=2) ,'months_passed'] = 1
data.loc[(data['months_passed'] > 2) & (data['months_passed'] <=6),'months_passed'] = 2
data.loc[data['months_passed'] > 6 ,'months_passed'] = 3
return data
def previous_(data):
data['Not_Contacted'] = 0
data['Contacted'] = 0
data.loc[data['previous'] == 0 ,'Not_Contacted'] = 1
data.loc[(data['previous'] >= 1) & (data['pdays'] <=99) ,'Contacted'] = 1
data.loc[data['previous'] >= 100,'Contacted'] = 2
return data
def balance_(data):
data['Neg_Balance'] = 0
data['No_Balance'] = 0
data['Pos_Balance'] = 0
data.loc[~data['balance']<0,'Neg_Balance'] = 1
data.loc[data['balance'] == 0,'No_Balance'] = 1
data.loc[(data['balance'] >= 1) & (data['balance'] <= 100),'Pos_Balance'] = 1
data.loc[(data['balance'] >= 101) & (data['balance'] <= 500),'Pos_Balance'] = 2
data.loc[(data['balance'] >= 501) & (data['balance'] <= 2000),'Pos_Balance'] = 3
data.loc[(data['balance'] >= 2001) & (data['balance'] <= 10000),'Pos_Balance'] = 4
data.loc[data['balance'] >= 10001,'Pos_Balance'] = 5
return data
def job_(data):
data.loc[data['job'] == "management",'job'] = 1
data.loc[data['job'] == "technician",'job'] = 2
data.loc[data['job'] == "entrepreneur",'job'] = 3
data.loc[data['job'] == "blue-collar",'job'] = 4
data.loc[data['job'] == "retired",'job'] = 5
data.loc[data['job'] == "admin.",'job'] = 6
data.loc[data['job'] == "services",'job'] = 7
data.loc[data['job'] == "self-employed",'job'] = 8
data.loc[data['job'] == "unemployed",'job'] = 9
data.loc[data['job'] == "student",'job'] = 10
data.loc[data['job'] == "housemaid",'job'] = 11
data.loc[data['job'] == "unknown",'job'] = 12
return data
def marital_(data):
data['married'] = 0
data['singles'] = 0
data['divorced'] = 0
data.loc[data['marital'] == 'married','married'] = 1
data.loc[data['marital'] == 'singles','singles'] = 1
data.loc[data['marital'] == 'divorced','divorced'] = 1
return data
def education_(data):
data['primary'] = 0
data['secondary'] = 0
data['tertiary'] = 0
data['unknown'] = 0
data.loc[data['education'] == 'primary','primary'] = 1
data.loc[data['education'] == 'secondary','secondary'] = 1
data.loc[data['education'] == 'tertiary','tertiary'] = 1
data.loc[data['education'] == 'unknown','unknown'] = 1
return data
data = campaign_(data)
data = age_(data)
data = education_(data)
data = balance_(data)
data = job_(data)
data = previous_(data)
data = duration_(data)
data = pdays_(data)
data = marital_(data)
print(data.columns)
# print(data.balance.value_counts())
# print(data.duration.value_counts())
# print(data.pdays.value_counts())
# print(data.campaign.value_counts())
# print(data.age.value_counts())
# **Plotting bar chart :**
#
# **data.Adult vs data.deposit :**
#
# The data is spread equally opting for term deposit or not.
#
# **data.Middle_Aged vs data.deposit :**
#
# The data is points out that people opt less for term deposit.
#
# **data.old vs data.deposit :**
#
# The data is points out that people opt more for term deposit as it covers people who are retired.
#
# **data.t_min vs data.deposit :**
#
# The data point brings out the fact that if th client is less interested in enrolling for term deposit, he/she is ready to invest less time on call with the agent.
#
# Note : t_min - Five minutes or less
#
# **data.t_e_min vs data.deposit :**
#
# The data points brings out the fact that if th client is interested in enrolling for term deposit, he/she is ready to investing minimum of 5 to 10 minute time on call with the agent.
#
# Note : t_e_min - greater than Five minutes or more
#
# **data.e_min vs data.deposit :**
#
# The data points suggest that if th client is very much interested in enrolling for term deposit, he/she is ready to investing more than 10 minute of time on call with the agent.
#
# Note : e_min - greater than ten minutes or more
#
# **data.pdays_not_contacted vs data.deposit :**
#
# The data points refers to the client who were not contacted in the previous campaign.And it looks like the people are contaced in current campaign are not contacted previously.
#
# **data.months_passed vs data.deposit :**
#
# The data points refers to the months passed after the client has been contacted before the current campaign.
#
# **data.Contacted vs data.deposit :**
#
# The data points refers to the no. of contact for a client has been contacted before this campaign.Fewer no. of contacts are more likely to enroll for term deposit
#
# **data.not_Contacted vs data.deposit :**
#
# The data points refers that no contact is made for a client before this campaign. Not contacted Clients are less likely to enroll for term deposit
#
# **data.Pos_Balance vs data.deposit :**
#
# Here, We can clearly see as the balance in the account increases the no. of client enrolling for the term deposit is more and more.
#
# **data.No_Balance vs data.deposit :**
#
# Here, We can see as the balance in the account is zero, the no. of client enrolling for the term deposit are less.
#
# **data.Neg_Balance vs data.deposit :**
#
# We can infer that as the balance in the account is -ve, the no. of client enrolling for the term deposit are very less and feature come in place while classifying such data points
#
# **data.campaign vs data.deposit :**
#
# The data points refers that no. of contact made to a client in this campaign. If a client is contacted once or twice are more likely to enroll than clients who are contacted more than 3 times.
# In[ ]:
draw_data = pd.crosstab(data.Adult, data.deposit)
draw_data.div(draw_data.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True, color=['deepskyblue','steelblue'],grid=False, figsize=(15, 5))
print()
# In[ ]:
draw_data = pd.crosstab(data.Middle_Aged, data.deposit)
draw_data.div(draw_data.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True, color=['deepskyblue','steelblue'],grid=False, figsize=(15, 5))
print()
# In[ ]:
draw_data = pd.crosstab(data.old, data.deposit)
draw_data.div(draw_data.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True, color=['deepskyblue','steelblue'],grid=False, figsize=(15, 5))
print()
# In[ ]:
draw_data = pd.crosstab(data.t_min, data.deposit)
draw_data.div(draw_data.sum(1).astype(float), axis=0).plot(kind='bar', stacked=False, color=['deepskyblue','steelblue'],grid=False, figsize=(15, 5))
print()
# In[ ]:
draw_data = | pd.crosstab(data.t_e_min, data.deposit) | pandas.crosstab |
#!/usr/bin/env python
r"""Aggregate, create, and save spiral plots.
"""
import pdb # noqa: F401
import logging
import numpy as np
import pandas as pd
import matplotlib as mpl
from datetime import datetime
from numbers import Number
from collections import namedtuple
from numba import njit, prange
from matplotlib import pyplot as plt
from . import base
from . import labels as labels_module
InitialSpiralEdges = namedtuple("InitialSpiralEdges", "x,y")
# SpiralMeshData = namedtuple("SpiralMeshData", "x,y")
SpiralMeshBinID = namedtuple("SpiralMeshBinID", "id,fill,visited")
SpiralFilterThresholds = namedtuple(
"SpiralFilterThresholds", "density,size", defaults=(False,)
)
@njit(parallel=True)
def get_counts_per_bin(bins, x, y):
nbins = bins.shape[0]
cell_count = np.full(nbins, 0, dtype=np.int64)
for i in prange(nbins):
x0, x1, y0, y1 = bins[i]
left = x >= x0
right = x < x1
bottom = y >= y0
top = y < y1
chk_cell = left & right & bottom & top
cell_count[i] = chk_cell.sum()
return cell_count
@njit(parallel=True)
def calculate_bin_number_with_numba(mesh, x, y):
fill = -9999
zbin = np.full(x.size, fill, dtype=np.int64)
nbins = mesh.shape[0]
bin_visited = np.zeros(nbins, dtype=np.int64)
for i in prange(nbins):
x0, x1, y0, y1 = mesh[i]
# Assume that largest x- and y-edges are extended by larger of 1% and 0.01
# so that we can just naively use < instead of a special case of <=.
# At time of writing (20200418), `SpiralPlot.initialize_mesh` did this.
tk = (x >= x0) & (x < x1) & (y >= y0) & (y < y1)
zbin[tk] = i
bin_visited[i] += 1
return zbin, fill, bin_visited
class SpiralMesh(object):
def __init__(self, x, y, initial_xedges, initial_yedges, min_per_bin=250):
self.set_data(x, y)
self.set_min_per_bin(min_per_bin)
self.set_initial_edges(initial_xedges, initial_yedges)
self._cell_filter_thresholds = SpiralFilterThresholds(density=False, size=False)
@property
def bin_id(self):
return self._bin_id
@property
def cat(self):
r""":py:class:`pd.Categorical` version of `bin_id`, with fill bin removed."""
return self._cat
@property
def data(self):
return self._data
@property
def initial_edges(self):
return self._initial_edges
@property
def mesh(self):
return self._mesh
@property
def min_per_bin(self):
return self._min_per_bin
@property
def cell_filter_thresholds(self):
return self._cell_filter_thresholds
@property
def cell_filter(self):
r"""Build a boolean :py:class:`Series` selecting mesh cells that meet
density and area criteria specified by `mesh_cell_filter_thresholds`.
Notes
----
Neither `density` nor `size` convert log-scale edges into linear scale.
Doing so would overweight the area of mesh cells at larger values on a given axis.
"""
density = self.cell_filter_thresholds.density
size = self.cell_filter_thresholds.size
x = self.mesh[:, [0, 1]]
y = self.mesh[:, [2, 3]]
dx = x[:, 1] - x[:, 0]
dy = y[:, 1] - y[:, 0]
dA = dx * dy
tk = np.full_like(dx, True, dtype=bool)
if size:
size_quantile = np.quantile(dA, size)
tk_size = dA < size_quantile
tk = tk & (tk_size)
if density:
cnt = np.bincount(self.bin_id.id, minlength=self.mesh.shape[0])
assert cnt.shape == tk.shape
cell_density = cnt / dA
density_quantile = np.quantile(cell_density, density)
tk_density = cell_density > density_quantile
tk = tk & tk_density
return tk
def set_cell_filter_thresholds(self, **kwargs):
r"""Set or update the :py:meth:`mesh_cell_filter_thresholds`.
Parameters
----------
density: scalar
The density quantile above which we want to select bins, e.g.
above the 0.01 quantile. This ensures that each bin meets some
sufficient fill factor.
size: scalar
The size quantile below which we want to select bins, e.g.
below the 0.99 quantile. This ensures that the bin isn't so large
that it will appear as an outlier.
"""
density = kwargs.pop("density", False)
size = kwargs.pop("size", False)
if len(kwargs.keys()):
extra = "\n".join(["{}: {}".format(k, v) for k, v in kwargs.items()])
raise KeyError("Unexpected kwarg\n{}".format(extra))
self._cell_filter_thresholds = SpiralFilterThresholds(
density=density, size=size
)
def set_initial_edges(self, xedges, yedges):
self._initial_edges = InitialSpiralEdges(xedges, yedges)
def set_data(self, x, y):
data = pd.concat({"x": x, "y": y}, axis=1)
self._data = data # SpiralMeshData(x, y)
def set_min_per_bin(self, new):
self._min_per_bin = int(new)
def initialize_bins(self):
# Leaves initial edges altered when we change maximum edge.
xbins = self.initial_edges.x
ybins = self.initial_edges.y
# # Account for highest bin = 0 already done in `SpiralPlot2D.initialize_mesh`.
# xbins[-1] = np.max([0.01, 1.01 * xbins[-1]])
# ybins[-1] = np.max([0.01, 1.01 * ybins[-1]])
left = xbins[:-1]
right = xbins[1:]
bottom = ybins[:-1]
top = ybins[1:]
nx = left.size
ny = bottom.size
mesh = np.full((nx * ny, 4), np.nan, dtype=np.float64)
for x0, x1, i in zip(left, right, range(nx)):
for y0, y1, j in zip(bottom, top, range(ny)):
# NOTE: i*ny+j means go to i'th row, which has
# nrow * number of bins passed. Then go
# to j'th bin because we have to traverse
# to the j'th y-bin too.
mesh[(i * ny) + j] = [x0, x1, y0, y1]
mesh = np.array(mesh)
# pdb.set_trace()
self.initial_mesh = np.array(mesh)
return mesh
@staticmethod
def process_one_spiral_step(bins, x, y, min_per_bin):
# print("Processing spiral step", flush=True)
# start0 = datetime.now()
cell_count = get_counts_per_bin(bins, x, y)
bins_to_replace = cell_count > min_per_bin
nbins_to_replace = bins_to_replace.sum()
if not nbins_to_replace:
return None, 0
xhyh = 0.5 * (bins[:, [0, 2]] + bins[:, [1, 3]])
def split_this_cell(idx):
x0, x1, y0, y1 = bins[idx]
xh, yh = xhyh[idx]
# Reduce calls to `np.array`.
# Just return a list here.
split_cell = [
[x0, xh, y0, yh],
[xh, x1, y0, yh],
[xh, x1, yh, y1],
[x0, xh, yh, y1],
]
return split_cell
new_cells = bins_to_replace.sum() * [None]
for i, idx in enumerate(np.where(bins_to_replace)[0]):
new_cells[i] = split_this_cell(idx)
new_cells = np.vstack(new_cells)
bins[bins_to_replace] = np.nan
# stop = datetime.now()
# print(f"Done Building replacement grid cells (dt={stop-start1})", flush=True)
# print(f"Done Processing spiral step (dt={stop-start0})", flush=True)
return new_cells, nbins_to_replace
@staticmethod
def _visualize_logged_stats(stats_str):
from matplotlib import pyplot as plt
stats = [[y.strip() for y in x.split(" ") if y] for x in stats_str.split("\n")]
stats.pop(1) # Remove column underline row
stats = np.array(stats)
index = pd.Index(stats[1:, 0].astype(int), name="Step")
n_replaced = stats[1:, 1].astype(int)
dt = pd.to_timedelta(stats[1:, 2]).total_seconds()
dt_unit = "s"
if dt.max() > 60:
dt /= 60
dt_unit = "m"
if dt.max() > 60:
dt /= 60
dt_unit = "H"
if dt.max() > 24:
dt /= 24
dt_unit = "D"
dt_key = f"Elapsed [{dt_unit}]"
stats = | pd.DataFrame({dt_key: dt, "N Divisions": n_replaced}, index=index) | pandas.DataFrame |
import pandas as pd
import os.path
frames=[]
sheet=[0,1,1,2,6,6,8,4,4,8,8,8,9,9,1,2,1,4,6,4,10,34,34,8,1,34,34,34,34,7]
total=0
for i in range(1,len(sheet)):
for j in range(1,sheet[i]+1):
total+=1
print (total)
now=0.0
for i in range(1,len(sheet)):
for j in range(1,sheet[i]+1):
if os.path.isfile('SektorRiil%d_%d.csv'%(i,j))==False:
continue
df=pd.read_csv('SektorRiil%d_%d.csv'%(i,j), header=0)
df.columns = ['grafik','nama','date','value','frekuensi','satuan']
df=df.dropna()
now+=1
print (now/total*100, '% completed')
frames.append(df)
fo = open("joinRiilProgress.txt", "w")
fo.truncate(0)
fo.seek(0)
fo.write("%s"%(now/total*100))
fo.close()
result= | pd.concat(frames,ignore_index=True,axis=0) | pandas.concat |
# external libraries
import pandas as pd
import numpy as np
from collections import Counter
from ast import literal_eval
import time
import sys
from shutil import copyfile
# tensorflow and keras
import keras.optimizers
from keras.datasets import imdb
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Concatenate
from keras.layers import GRU
from keras.layers.embeddings import Embedding
from keras.constraints import maxnorm
from keras.regularizers import L1L2
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
from keras.callbacks import TensorBoard
# fix random seed for reproducibility - only works for CPU version of tensorflow
np.random.seed(42)
# our libraries
import preprocess
import visualise
train = pd.read_csv('../../../data/processed/tok_phase1_movie_reviews-train_train80.csv')
validate = pd.read_csv('../../../data/processed/tok_phase1_movie_reviews-train_validate10.csv')
test = pd.read_csv('../../../data/processed/tok_phase1_movie-reviews-train_test10.csv')
# train = train.head(4500)
print("\nFiles read, converting tokens to lists.")
for frame in [train, validate, test]:
for col in ['summary_tokens', 'review_tokens']:
frame[col] = frame[col].map(literal_eval)
### Preprocessing
# declare the padding and unknown symbols
pad_mask_int = 0
pad_mask_sym = '==pad_mask=='
unknown_int = 1
unknown_sym = '==unknown_sym=='
# vocabulary set
vocab_counter = Counter()
for doc in train['summary_tokens']:
vocab_counter.update(doc)
for doc in train['review_tokens']:
vocab_counter.update(doc)
min_times_word_used = 2 # if at least 2 then the model will be prepared for unknown words in test sets
print(len(vocab_counter),
"tokens before discarding those that appear less than {} times.".format(min_times_word_used))
for key in list(vocab_counter.keys()):
if vocab_counter[key] < min_times_word_used:
vocab_counter.pop(key)
print(len(vocab_counter),
"tokens after discarding those that appear less than {} times.".format(min_times_word_used))
vocab_set = set(vocab_counter.keys())
# vocabulary list and int map
vocab_list = [pad_mask_sym, unknown_sym] + sorted(vocab_set)
vocab_map = {word: index for index, word in enumerate(vocab_list)}
# label set
label_set = set(train['polarity'].unique())
# label list and int map
label_list = sorted(label_set)
label_map = {word: index for index, word in enumerate(label_list)}
# create one-hot sparse matrix of labels
y_train = preprocess.create_one_hot(train['polarity'], label_map)
y_validate = preprocess.create_one_hot(validate['polarity'], label_map)
y_test = preprocess.create_one_hot(test['polarity'], label_map)
# replace strings with ints (tokenization is done on the Series fed to word_index())
train_summary = preprocess.word_index(train['summary_tokens'], vocab_map, unknown_int)
train_review = preprocess.word_index(train['review_tokens'], vocab_map, unknown_int)
validate_summary = preprocess.word_index(validate['summary_tokens'], vocab_map, unknown_int)
validate_review = preprocess.word_index(validate['review_tokens'], vocab_map, unknown_int)
test_summary = preprocess.word_index(test['summary_tokens'], vocab_map, unknown_int)
test_review = preprocess.word_index(test['review_tokens'], vocab_map, unknown_int)
# pad / truncate
from keras.preprocessing.sequence import pad_sequences
summary_len = max(map(len, list(train['summary_tokens'])))
review_len = 500
train_summary = pad_sequences(sequences=train_summary,
maxlen=summary_len,
dtype='int32',
padding='pre',
value=pad_mask_int)
train_review = pad_sequences(sequences=train_review,
maxlen=review_len,
dtype='int32',
padding='pre',
truncating='pre',
value=pad_mask_int)
validate_summary = pad_sequences(sequences=validate_summary,
maxlen=summary_len,
dtype='int32',
padding='pre',
value=pad_mask_int)
validate_review = pad_sequences(sequences=validate_review,
maxlen=review_len,
dtype='int32',
padding='pre',
truncating='pre',
value=pad_mask_int)
test_summary = pad_sequences(sequences=test_summary,
maxlen=summary_len,
dtype='int32',
padding='pre',
value=pad_mask_int)
test_review = pad_sequences(sequences=test_review,
maxlen=review_len,
dtype='int32',
padding='pre',
truncating='pre',
value=pad_mask_int)
np.concatenate([y_validate.todense(), y_test.todense()])
# pretrained embeddings are from https://nlp.stanford.edu/projects/glove/
# start by loading in the embedding matrix
# load the whole embedding into memory
print("\nReading big ol' word embeddings")
embeddings_index = dict()
with open('../../../data/external/glove.42B.300d.txt') as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Loaded %s word vectors.' % len(embeddings_index))
embedding_dim = 300
# create a weight matrix for words in training docs
embedding_matrix = np.zeros((len(vocab_list), embedding_dim))
count = 0
for i, word in enumerate(vocab_list):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
else:
count += 1
# print(word)
pass # maybe we should use fuzzywuzzy to get vector of nearest word? Instead of all zeros
print("Failed to find {} out of {} tokens.".format(count, len(vocab_list)))
del embeddings_index
# Name run for tensorboard
NAME = 'rerun_final_GRU_{}'.format(time.strftime('%y%m%d%_H%M', time.localtime(time.time())))
# copy this .py to the Tensorboard logs folder
copyfile(sys.argv[0], './tb_logs/{}.py'.format(NAME))
# Keras functional API for joined model
input_s = Input(shape=(summary_len,), dtype='int32', name='input_s')
input_r = Input(shape=(review_len,), dtype='int32', name='input_r')
embedding_vector_length = embedding_dim
GRU_nodes_summary = 64
GRU_nodes_review = 100
emb = Embedding(len(vocab_list), embedding_vector_length, mask_zero=True,
weights=[embedding_matrix], trainable=False)
emb_s = emb(input_s)
emb_r = emb(input_r)
gru_s = GRU(GRU_nodes_summary, activation='tanh', recurrent_activation='sigmoid', dropout=0.3,
recurrent_dropout=0.4, kernel_constraint=maxnorm(3), recurrent_constraint=maxnorm(3),
unroll=True,
use_bias=True, kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal', bias_initializer='zeros',
kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=L1L2(l1=0.1, l2=0.0),
activity_regularizer=None,
bias_constraint=None, implementation=1, return_sequences=False, return_state=False,
go_backwards=False, stateful=False, reset_after=False)(emb_s)
gru_r = GRU(GRU_nodes_review, activation='tanh', recurrent_activation='sigmoid', dropout=0.3,
recurrent_dropout=0.4, unroll=True,
kernel_constraint=None, recurrent_constraint=None,
use_bias=True, kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal', bias_initializer='zeros',
kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=L1L2(l1=0.1, l2=0.0),
activity_regularizer=None,
bias_constraint=None, implementation=1, return_sequences=False, return_state=False,
go_backwards=False, stateful=False, reset_after=False)(emb_r)
concat = Concatenate()([gru_s, gru_r])
output = Dense(len(label_set), activation='softmax')(concat)
model = Model([input_s, input_r], output)
nadam1 = keras.optimizers.nadam(lr=0.0003)
model.compile(loss='categorical_crossentropy', optimizer=nadam1, metrics=['accuracy'])
# unfrozen embeddings
emb.trainable = True
thawn = Model([input_s, input_r], output)
nadam2 = keras.optimizers.nadam(lr=0.00003)
thawn.compile(loss='categorical_crossentropy', optimizer=nadam2, metrics=['accuracy'])
print(model.summary())
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
tensorboard = TensorBoard(log_dir = './tb_logs/{}'.format(NAME))
hist1 = model.fit(x=[train_summary, train_review],
y=y_train,
validation_data=([validate_summary, validate_review],
y_validate),
epochs=50, batch_size=64, callbacks=[es, tensorboard])
hist2 = thawn.fit(x=[train_summary, train_review],
y=y_train,
validation_data=([validate_summary, validate_review],
y_validate),
epochs=50, batch_size=64, callbacks=[es, tensorboard])
visualise.plot_both_results(hist1.history['loss'] + hist2.history['loss'],
hist1.history['acc'] + hist2.history['acc'],
hist1.history['val_loss'] + hist2.history['val_loss'],
hist1.history['val_acc'] + hist2.history['val_acc'],
"History", '../reports/figures/GRU_summary_final_phase3_hist.svg')
# visualise.plot_results(hist1.history['val_loss'] + hist2.history['val_loss'],
# hist1.history['val_acc'] + hist2.history['val_acc'],
# "Validation history", '../reports/figures/GRU_summary_validation_hist.svg')
# Predict for validation data
# y_pred = thawn.predict([validate_summary, validate_review])
# Undo one-hot
# y_pred = preprocess.undo_one_hot(y_pred, label_list)
# y_orig = validate['polarity']
# visualise.plot_confusion(y_orig, y_pred, label_list) # yeah, need to fix so figure is saved instead
# Score for validation
print("Validation score")
print(thawn.evaluate([validate_summary, validate_review], y_validate))
# Predict for test data
# y_pred = thawn.predict([validate_summary, validate_review])
# Undo one-hot
# y_pred = preprocess.undo_one_hot(y_pred, label_list)
# y_orig = test['polarity']
# visualise.plot_confusion(y_orig, y_pred, label_list) # yeah, need to fix so figure is saved instead
# Score for test
print("Test score")
print(thawn.evaluate([test_summary, test_review], y_test))
# Refine weights with validation and training
hist3 = thawn.fit(x=[np.concatenate([validate_summary, test_summary]),
np.concatenate([validate_review, test_review])],
y=np.concatenate([y_validate.todense(), y_test.todense()]),
epochs=2, batch_size=128, callbacks=[tensorboard])
#print("\nChecking for weights that have gone NaN (that's a bad thing):")
#for weight in model.get_weights():
# df = pd.DataFrame(weight)
# print(df[df.isnull().any(axis=1)])
thawn.save('../models/final_phase1.h5')
movie = pd.read_csv('../../../data/processed/tok_phase1-movie-hidden.csv')
games = | pd.read_csv('../../../data/processed/tok_phase1-games-hidden.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 31.25, 4., 0., 1), (3, 4, 0, 15.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 4.0, 400.0, -np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 800.0, -100.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_signals_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
portfolio = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_max_orders(self):
_ = from_signals_all(price=price_wide)
_ = from_signals_all(price=price_wide, max_orders=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_all(price=price_wide, log=True)
_ = from_signals_all(price=price_wide, log=True, max_logs=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandom:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='rand_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples([(0.25, 0.25), (0.5, 0.5)], names=['rprob_entry_prob', 'rprob_exit_prob'])
)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_all(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='all', **kwargs)
def from_orders_longonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='longonly', **kwargs)
def from_orders_shortonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_all().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1), (8, 0, 2, 100.0, 1.0, 0.0, 0),
(9, 1, 2, 100.0, 2.0, 0.0, 1), (10, 3, 2, 50.0, 4.0, 0.0, 0), (11, 4, 2, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 0, 2, 100.0, 1.0, 0.0, 1), (5, 1, 2, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_all(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 198.01980198019803, 2.02, 0.0, 1),
(2, 3, 0, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 1),
(2, 3, 0, 49.504950495049506, 4.04, 0.0, 0), (3, 4, 0, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 3, 1, 1.0, 4.0, 0.4, 1), (7, 4, 1, 1.0, 5.0, 0.5, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 2.0, 0), (10, 3, 2, 1.0, 4.0, 4.0, 1), (11, 4, 2, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 3, 1, 1.0, 4.0, 0.1, 1), (7, 4, 1, 1.0, 5.0, 0.1, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 1.0, 0), (10, 3, 2, 1.0, 4.0, 1.0, 1), (11, 4, 2, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_all(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 3, 1, 1.0, 3.6, 0.0, 1), (7, 4, 1, 1.0, 5.5, 0.0, 0), (8, 0, 2, 1.0, 0.0, 0.0, 1),
(9, 1, 2, 1.0, 4.0, 0.0, 0), (10, 3, 2, 1.0, 0.0, 0.0, 1), (11, 4, 2, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 1, 0, 0.5, 2.0, 0.0, 0), (2, 3, 0, 0.5, 4.0, 0.0, 1),
(3, 4, 0, 0.5, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0), (8, 0, 2, 1.0, 1.0, 0.0, 1),
(9, 1, 2, 1.0, 2.0, 0.0, 0), (10, 3, 2, 1.0, 4.0, 0.0, 1), (11, 4, 2, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_all(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 3, 1, 1.0, 4.0, 0.0, 0),
(6, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 3, 1, 1.0, 4.0, 0.0, 0), (5, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 4, 1, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0), (4, 0, 1, 1000.0, 1.0, 0.0, 1), (5, 3, 1, 1000.0, 4.0, 0.0, 1),
(6, 4, 1, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 1, 0, 0, 0.0, 100.0, 2.0, 200.0, -np.inf, 0, 2, 2.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 2, 0, 0, 400.0, -100.0, 3.0, 100.0, np.nan, 0, 2, 3.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 3, 0, 0, 400.0, -100.0, 4.0, 0.0, np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 4, 0, 0, 0.0, 0.0, 5.0, 0.0, -np.inf, 0, 2, 5.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 1, 0, 200.0, 2.0, 0.0, 1),
(3, 3, 1, 200.0, 4.0, 0.0, 0), (4, 4, 1, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 3, 1, 100.0, 4.0, 0.0, 0),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_orders_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_target_shares(self):
record_arrays_close(
from_orders_all(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=75., size_type='targetshares',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_all(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 2, 1, 8.333333333333332, 3.0, 0.0, 0),
(8, 3, 1, 4.166666666666668, 4.0, 0.0, 0), (9, 4, 1, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 25.0, 2.0, 0.0, 0),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 0), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 0),
(4, 4, 0, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0),
(2, 1, 0, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 1, 2, 25.0, 2.0, 0.0, 0), (5, 2, 0, 8.333333333333332, 3.0, 0.0, 1),
(6, 2, 1, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 3, 0, 4.166666666666668, 4.0, 0.0, 1), (9, 3, 1, 4.166666666666668, 4.0, 0.0, 1),
(10, 3, 2, 4.166666666666668, 4.0, 0.0, 1), (11, 4, 0, 2.5, 5.0, 0.0, 1),
(12, 4, 1, 2.5, 5.0, 0.0, 1), (13, 4, 2, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 2, 1, 6.25, 3.0, 0.0, 0), (8, 3, 1, 2.34375, 4.0, 0.0, 0),
(9, 4, 1, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 37.5, 2.0, 0.0, 0), (2, 2, 0, 6.25, 3.0, 0.0, 0),
(3, 3, 0, 2.34375, 4.0, 0.0, 0), (4, 4, 0, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 0, 1, 2.50000000e+01, 1., 0., 0),
(2, 0, 2, 1.25000000e+01, 1., 0., 0), (3, 1, 0, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 1, 2, 7.81250000e-01, 2., 0., 0),
(6, 2, 0, 2.60416667e-01, 3., 0., 0), (7, 2, 1, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 3, 0, 2.44140625e-02, 4., 0., 0),
(10, 3, 1, 1.22070312e-02, 4., 0., 0), (11, 3, 2, 6.10351562e-03, 4., 0., 0),
(12, 4, 0, 2.44140625e-03, 5., 0., 0), (13, 4, 1, 1.22070312e-03, 5., 0., 0),
(14, 4, 2, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_all(price=price_wide)
_ = from_orders_all(price=price_wide, max_orders=9)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_all(price=price_wide, log=True)
_ = from_orders_all(price=price_wide, log=True, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, log=True, max_logs=14)
# ############# from_order_func ############# #
@njit
def order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col])
@njit
def log_order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col], log=True)
class TestFromOrderFunc:
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_one_column(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price.tolist(), order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(price, order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_multiple_columns(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (5, 0, 1, 100.0, 1.0, 0.0, 0),
(6, 1, 1, 200.0, 2.0, 0.0, 1), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_shape(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5,), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise,
keys=pd.Index(['first'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0, 1, 2], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise,
keys=pd.Index(['first', 'second', 'third'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first', 'second', 'third'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_group_by(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(8, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_cash_sharing(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 1, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 1, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 2, 200.0, 2.0, 0.0, 1),
(4, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(4, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (5, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def segment_prep_func_nb(sc, target_hold_value):
order_size = np.copy(target_hold_value[sc.i, sc.from_col:sc.to_col])
order_size_type = np.full(sc.group_len, SizeType.TargetValue)
direction = np.full(sc.group_len, Direction.All)
order_value_out = np.empty(sc.group_len, dtype=np.float_)
sc.last_val_price[sc.from_col:sc.to_col] = sc.close[sc.i, sc.from_col:sc.to_col]
nb.sort_call_seq_nb(sc, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(oc, order_size, order_size_type, direction):
col_i = oc.call_seq_now[oc.call_idx]
return nb.create_order_nb(
size=order_size[col_i],
size_type=order_size_type[col_i],
price=oc.close[oc.i, col_i],
direction=direction[col_i]
)
portfolio = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, segment_prep_func_nb=segment_prep_func_nb,
segment_prep_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_value(self, test_row_wise):
@njit
def target_val_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_val_order_func_nb(oc):
return nb.create_order_nb(size=50., size_type=SizeType.TargetValue, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
segment_prep_func_nb=target_val_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_percent(self, test_row_wise):
@njit
def target_pct_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_pct_order_func_nb(oc):
return nb.create_order_nb(size=0.5, size_type=SizeType.TargetPercent, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
segment_prep_func_nb=target_pct_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_init_cash(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=[1., 10., np.inf])
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 1.0, 0.0, 0),
(2, 0, 2, 10.0, 1.0, 0.0, 0), (3, 1, 0, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 1, 2, 10.0, 2.0, 0.0, 1),
(6, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 3, 0, 10.0, 4.0, 0.0, 1),
(10, 3, 1, 10.0, 4.0, 0.0, 1), (11, 3, 2, 10.0, 4.0, 0.0, 1),
(12, 4, 0, 8.0, 5.0, 0.0, 0), (13, 4, 1, 8.0, 5.0, 0.0, 0),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 2.0, 0.0, 1),
(2, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (3, 3, 0, 10.0, 4.0, 0.0, 1),
(4, 4, 0, 8.0, 5.0, 0.0, 0), (5, 0, 1, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 3, 1, 10.0, 4.0, 0.0, 1), (9, 4, 1, 8.0, 5.0, 0.0, 0),
(10, 0, 2, 10.0, 1.0, 0.0, 0), (11, 1, 2, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 3, 2, 10.0, 4.0, 0.0, 1),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(portfolio._init_cash) == np.ndarray
base_portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=np.inf)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.Auto)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.Auto
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.AutoAlign)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def group_prep_func_nb(gc, call_i, group_lst):
call_i[0] += 1
group_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,)
)
assert call_i[0] == 28
assert list(sim_lst) == [1]
assert list(group_lst) == [2, 18]
assert list(segment_lst) == [3, 6, 9, 12, 15, 19, 21, 23, 25, 27]
assert list(order_lst) == [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 20, 22, 24, 26, 28]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, True],
[False, False],
[False, True],
[False, False],
[False, True],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask
)
assert call_i[0] == 8
assert list(sim_lst) == [1]
assert list(group_lst) == [2]
assert list(segment_lst) == [3, 5, 7]
assert list(order_lst) == [4, 6, 8]
def test_func_calls_row_wise(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def row_prep_func_nb(gc, call_i, row_lst):
call_i[0] += 1
row_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
row_wise=True
)
assert call_i[0] == 31
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 8, 14, 20, 26]
assert list(segment_lst) == [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]
assert list(order_lst) == [4, 5, 7, 10, 11, 13, 16, 17, 19, 22, 23, 25, 28, 29, 31]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask,
row_wise=True
)
assert call_i[0] == 14
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 5, 9]
assert list(segment_lst) == [3, 6, 10, 13]
assert list(order_lst) == [4, 7, 8, 11, 12, 14]
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_orders(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=14)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_logs(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=14)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'all']
group_by = pd.Index(['first', 'first', 'second'], name='group')
portfolio = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D'
) # independent
portfolio_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D'
) # grouped
portfolio_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D'
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
assert vbt.Portfolio.loads(portfolio['a'].dumps()) == portfolio['a']
assert vbt.Portfolio.loads(portfolio.dumps()) == portfolio
portfolio.save(tmp_path / 'portfolio')
assert vbt.Portfolio.load(tmp_path / 'portfolio') == portfolio
def test_wrapper(self):
pd.testing.assert_index_equal(
portfolio.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
price_na.columns
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.grouper.group_by is None
assert portfolio.wrapper.grouper.allow_enable
assert portfolio.wrapper.grouper.allow_disable
assert portfolio.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.columns,
price_na.columns
)
assert portfolio_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.grouper.group_by,
group_by
)
assert portfolio_grouped.wrapper.grouper.allow_enable
assert portfolio_grouped.wrapper.grouper.allow_disable
assert portfolio_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_shared.wrapper.columns,
price_na.columns
)
assert portfolio_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_shared.wrapper.grouper.group_by,
group_by
)
assert not portfolio_shared.wrapper.grouper.allow_enable
assert portfolio_shared.wrapper.grouper.allow_disable
assert not portfolio_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert portfolio['a'].wrapper == portfolio.wrapper['a']
assert portfolio['a'].orders == portfolio.orders['a']
assert portfolio['a'].logs == portfolio.logs['a']
assert portfolio['a'].init_cash == portfolio.init_cash['a']
pd.testing.assert_series_equal(portfolio['a'].call_seq, portfolio.call_seq['a'])
assert portfolio['c'].wrapper == portfolio.wrapper['c']
assert portfolio['c'].orders == portfolio.orders['c']
assert portfolio['c'].logs == portfolio.logs['c']
assert portfolio['c'].init_cash == portfolio.init_cash['c']
pd.testing.assert_series_equal(portfolio['c'].call_seq, portfolio.call_seq['c'])
assert portfolio[['c']].wrapper == portfolio.wrapper[['c']]
assert portfolio[['c']].orders == portfolio.orders[['c']]
assert portfolio[['c']].logs == portfolio.logs[['c']]
pd.testing.assert_series_equal(portfolio[['c']].init_cash, portfolio.init_cash[['c']])
pd.testing.assert_frame_equal(portfolio[['c']].call_seq, portfolio.call_seq[['c']])
assert portfolio_grouped['first'].wrapper == portfolio_grouped.wrapper['first']
assert portfolio_grouped['first'].orders == portfolio_grouped.orders['first']
assert portfolio_grouped['first'].logs == portfolio_grouped.logs['first']
assert portfolio_grouped['first'].init_cash == portfolio_grouped.init_cash['first']
pd.testing.assert_frame_equal(portfolio_grouped['first'].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped[['first']].wrapper == portfolio_grouped.wrapper[['first']]
assert portfolio_grouped[['first']].orders == portfolio_grouped.orders[['first']]
assert portfolio_grouped[['first']].logs == portfolio_grouped.logs[['first']]
pd.testing.assert_series_equal(
portfolio_grouped[['first']].init_cash,
portfolio_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_grouped[['first']].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped['second'].wrapper == portfolio_grouped.wrapper['second']
assert portfolio_grouped['second'].orders == portfolio_grouped.orders['second']
assert portfolio_grouped['second'].logs == portfolio_grouped.logs['second']
assert portfolio_grouped['second'].init_cash == portfolio_grouped.init_cash['second']
pd.testing.assert_series_equal(portfolio_grouped['second'].call_seq, portfolio_grouped.call_seq['c'])
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].wrapper == portfolio_grouped.wrapper[['second']]
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].logs == portfolio_grouped.logs[['second']]
pd.testing.assert_series_equal(
portfolio_grouped[['second']].init_cash,
portfolio_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_grouped[['second']].call_seq, portfolio_grouped.call_seq[['c']])
assert portfolio_shared['first'].wrapper == portfolio_shared.wrapper['first']
assert portfolio_shared['first'].orders == portfolio_shared.orders['first']
assert portfolio_shared['first'].logs == portfolio_shared.logs['first']
assert portfolio_shared['first'].init_cash == portfolio_shared.init_cash['first']
pd.testing.assert_frame_equal(portfolio_shared['first'].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].wrapper == portfolio_shared.wrapper[['first']]
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].logs == portfolio_shared.logs[['first']]
pd.testing.assert_series_equal(
portfolio_shared[['first']].init_cash,
portfolio_shared.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_shared[['first']].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared['second'].wrapper == portfolio_shared.wrapper['second']
assert portfolio_shared['second'].orders == portfolio_shared.orders['second']
assert portfolio_shared['second'].logs == portfolio_shared.logs['second']
assert portfolio_shared['second'].init_cash == portfolio_shared.init_cash['second']
pd.testing.assert_series_equal(portfolio_shared['second'].call_seq, portfolio_shared.call_seq['c'])
assert portfolio_shared[['second']].wrapper == portfolio_shared.wrapper[['second']]
assert portfolio_shared[['second']].orders == portfolio_shared.orders[['second']]
assert portfolio_shared[['second']].logs == portfolio_shared.logs[['second']]
pd.testing.assert_series_equal(
portfolio_shared[['second']].init_cash,
portfolio_shared.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_shared[['second']].call_seq, portfolio_shared.call_seq[['c']])
def test_regroup(self):
assert portfolio.regroup(None) == portfolio
assert portfolio.regroup(False) == portfolio
assert portfolio.regroup(group_by) != portfolio
pd.testing.assert_index_equal(portfolio.regroup(group_by).wrapper.grouper.group_by, group_by)
assert portfolio_grouped.regroup(None) == portfolio_grouped
assert portfolio_grouped.regroup(False) != portfolio_grouped
assert portfolio_grouped.regroup(False).wrapper.grouper.group_by is None
assert portfolio_grouped.regroup(group_by) == portfolio_grouped
assert portfolio_shared.regroup(None) == portfolio_shared
with pytest.raises(Exception) as e_info:
_ = portfolio_shared.regroup(False)
assert portfolio_shared.regroup(group_by) == portfolio_shared
def test_cash_sharing(self):
assert not portfolio.cash_sharing
assert not portfolio_grouped.cash_sharing
assert portfolio_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
portfolio.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_incl_unrealized(self):
assert not vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=False).incl_unrealized
assert vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=True).incl_unrealized
def test_orders(self):
record_arrays_close(
portfolio.orders.values,
np.array([
(0, 1, 0, 0.1, 2.02, 0.10202, 0), (1, 2, 0, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 4, 0, 1.0, 5.05, 0.1505, 0), (3, 0, 1, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 3, 1, 0.1, 4.04, 0.10404000000000001, 0),
(6, 4, 1, 1.0, 4.95, 0.14950000000000002, 1), (7, 0, 2, 1.0, 1.01, 0.1101, 0),
(8, 1, 2, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 3, 2, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
portfolio.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, np.nan, 100.0, 1.0, 0, 0, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.0, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 1, 0, 0, 100.0, 0.0, 2.0, 100.0, 0.1, 0, 0, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.69598, 0.1, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 2, 0, 0, 99.69598, 0.1, 3.0, 99.99598, -1.0, 0, 0, 3.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 3, 0, 0, 99.89001, 0.0, 4.0, 99.89001, -0.1, 0, 0, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 4, 0, 0, 99.89001, 0.0, 5.0, 99.89001, 1.0, 0, 0, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 94.68951, 1.0, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 0, 1, 1, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 1, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.8801, -1.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 2.0, 98.8801, 0.1, 0, 1, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 2, 1, 1, 100.97612, -1.1, np.nan, np.nan, -1.0, 0, 1, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 3, 1, 1, 100.97612, -1.1, 4.0, 96.57611999999999, -0.1, 0, 1, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.46808, -1.0, 0.1, 4.04, 0.10404000000000001, 0, 0, -1, 5),
(9, 4, 1, 1, 100.46808, -1.0, 5.0, 95.46808, 1.0, 0, 1, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 105.26858, -2.0, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 0, 2, 2, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 2, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.8799, 1.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 1, 2, 2, 98.8799, 1.0, 2.0, 100.8799, 0.1, 0, 2, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.57588000000001, 1.1, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 3.0, 101.87588000000001, -1.0, 0, 2, 3.0,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True, 101.41618000000001,
0.10000000000000009, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 3, 2, 2, 101.41618000000001, 0.10000000000000009, 4.0, 101.81618000000002,
-0.1, 0, 2, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True,
101.70822000000001, 0.0, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 4, 2, 2, 101.70822000000001, 0.0, np.nan, 101.70822000000001, 1.0, 0, 2, np.nan, 0.01, 0.1, 0.01,
1e-08, np.inf, 0.0, True, False, True, 101.70822000000001, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.logs.count(),
result
)
def test_trades(self):
record_arrays_close(
portfolio.trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.trades.count(),
result
)
def test_positions(self):
record_arrays_close(
portfolio.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1)
], dtype=position_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
portfolio.drawdowns.values,
np.array([
(0, 0, 0, 4, 4, 0), (1, 1, 0, 4, 4, 0), (2, 2, 2, 3, 4, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(portfolio.close, price_na)
pd.testing.assert_frame_equal(portfolio_grouped.close, price_na)
pd.testing.assert_frame_equal(portfolio_shared.close, price_na)
def test_fill_close(self):
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=False),
price_na
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=False),
price_na.ffill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=True),
price_na.bfill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=True),
price_na.ffill().bfill()
)
def test_share_flow(self):
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.share_flow(),
result
)
def test_shares(self):
pd.testing.assert_frame_equal(
portfolio.shares(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.shares(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.shares(),
result
)
def test_pos_mask(self):
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(),
result
)
def test_pos_coverage(self):
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('pos_coverage')
)
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('pos_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
portfolio.cash_flow(short_cash=False),
pd.DataFrame(
np.array([
[0., -1.0999, -1.1201],
[-0.30402, -0.29998, -0.30402],
[0.19403, 0., 2.8403],
[0., 0.29996, 0.29204],
[-5.2005, -5.0995, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
portfolio.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
portfolio_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
portfolio.cash(short_cash=False),
pd.DataFrame(
np.array([
[100., 98.9001, 98.8799],
[99.69598, 98.60012, 98.57588],
[99.89001, 98.60012, 101.41618],
[99.89001, 98.90008, 101.70822],
[94.68951, 93.80058, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(),
result
)
def test_holding_value(self):
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., np.nan, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., np.nan, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[np.nan, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 0.01001, 0.],
[0., 0.02182537, 0.],
[0., np.nan, 0.],
[0., 0.03887266, 0.],
[0., 0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -0.01021449, 0.01001202],
[0.00200208, -0.02282155, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.0421496, 0.],
[0.05015573, -0.11933092, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.00505305, 0.01001202],
[0.00100052, -0.01120162, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.02052334, 0.],
[0.02503887, -0.05440679, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005003, 0.01001202],
[-0.01006684, 0.02183062],
[np.nan, 0.00294938],
[-0.02037095, 0.],
[-0.02564654, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0., -0.01001, 0.01001202],
[0.00200208, -0.02182537, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.03887266, 0.],
[0.05015573, -0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0050025, 0.01001202],
[0.00100052, -0.01095617, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.01971414, 0.],
[0.02503887, -0.04906757, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00495344, 0.01001202],
[-0.00984861, 0.02183062],
[np.nan, 0.00294938],
[-0.01957348, 0.],
[-0.02323332, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, np.nan, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, np.nan, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[np.nan, np.nan, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[np.nan, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[np.nan, np.nan, 9.33060570e-03],
[0.0, np.nan, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[np.nan, 9.33060570e-03],
[np.nan, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(),
result
)
def test_active_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, np.nan, 0.42740909],
[0., np.nan, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[np.nan, 0.42740909],
[np.nan, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.active_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(),
result
)
def test_market_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(),
result
)
def test_market_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns= | pd.Index(['first', 'second'], dtype='object', name='group') | pandas.Index |
from typing import Dict, Iterable, Tuple, Union
from pathlib import Path
import lmfit
import pandas as pd
def get_data_path(sub_path: str) -> Path:
"""
Returns the Path object of a path in data and
creates the parent folders if they don't exist already
Parameters
----------
sub_path : str
subpath in data directory
Returns
-------
Path
Path to a file in data
"""
data_base_path = Path(__file__).parent.parent / "data"
data_path = data_base_path / sub_path
if data_path.suffixes == []:
data_path.mkdir(parents=True, exist_ok=True)
else:
data_path.parent.mkdir(parents=True, exist_ok=True)
return data_path
def get_infectious(covid_df: pd.DataFrame) -> None:
"""
Calculates the number of still infectious people.
This function uses the mutability of DataFrames,
which is why it doesn't have a return value
Parameters
----------
covid_df : pd.DataFrame
Dataframe containing all covid19 data
"""
if covid_df.columns.isin(["recovered"]).any():
recovered = covid_df.recovered.fillna(0)
else:
recovered = 0
deaths = covid_df.deaths.fillna(0)
covid_df["still_infectious"] = covid_df.confirmed - recovered - deaths
def calc_country_total(covid_df: pd.DataFrame) -> pd.DataFrame:
"""
Calculates the total for each country from the covid_df,
where only data for regions was present before
Parameters
----------
covid_df : pd.DataFrame
covid19 DataFrame (needs to be in uniform style)
Returns
-------
pd.DataFrame
Dataframe containing the totals for countries, which before only had
their regions listed.
"""
total_df = pd.DataFrame()
for (parent, date), group in covid_df.groupby(["parent_region", "date"]):
if parent != "#Global":
country_total = group.sum()
country_total.parent_region = "#Global"
country_total.region = f"{parent} (total)"
country_total["date"] = date
total_df = total_df.append(country_total, ignore_index=True)
return total_df
def calc_worldwide_total(
covid_df: pd.DataFrame, parent_region_label="parent_region", region_label="region"
) -> pd.DataFrame:
"""
Calculates the worldwide total.
Parameters
----------
covid_df : pd.DataFrame
covid19 DataFrame (needs to be in uniform style)
parent_region_label: str
name of the parent_region column
region_label: str
name of the region column
Returns
-------
pd.DataFrame
Dataframe containing the worldwide totals.
"""
global_country_df = covid_df[covid_df[parent_region_label] == "#Global"]
worldwide_total_df = global_country_df.groupby(["date"]).sum()
worldwide_total_df[parent_region_label] = "#Global"
worldwide_total_df[region_label] = "#Worldwide"
worldwide_total_df.reset_index(inplace=True)
return worldwide_total_df
def get_shifted_dfs(
covid_df: pd.DataFrame,
time_shift: Union[int, float] = 1,
time_shift_unit: str = "D",
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Helper function to shift the date of the covid data by a given time
and gain DataFrames which can be used to calculate the growth and growth rate.
Parameters
----------
covid_df : pd.DataFrame
Full covid19 data from a data_source
time_shift : [int,float], optional
value by which the time should be shifted, by default 1
time_shift_unit : str, optional
unit of the time shift , by default "D"
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
shifted and unshifted covid19 data, with date, parent_region and region as index
"""
unshifted_data = covid_df.set_index(["date", "parent_region", "region"])
shifted_data = covid_df.copy()
shifted_data.date = shifted_data.date + pd.Timedelta(
time_shift, unit=time_shift_unit
)
shifted_data = shifted_data.set_index(["date", "parent_region", "region"])
return unshifted_data, shifted_data
def get_daily_growth(covid_df: pd.DataFrame) -> pd.DataFrame:
"""
Calculates the daily growth values
Parameters
----------
covid_df : pd.DataFrame
Full covid19 data from a data_source
Returns
-------
pd.DataFrame
covid19 DataFrame, with daily growth values instead of totals.
"""
unshifted_data, shifted_data = get_shifted_dfs(covid_df)
daily_increase = unshifted_data - shifted_data
return daily_increase.dropna().reset_index()
def get_growth_rate(covid_df: pd.DataFrame) -> pd.DataFrame:
"""
Calculates the growth rate values
Parameters
----------
covid_df : pd.DataFrame
Full covid19 data from a data_source
Returns
-------
pd.DataFrame
covid19 DataFrame, with growth rate values instead of totals.
"""
daily_growth = get_daily_growth(covid_df)
unshifted_data, shifted_data = get_shifted_dfs(daily_growth)
# the '+1' is needed to prevent zero division
growth_rate = unshifted_data / (shifted_data + 1)
return growth_rate.dropna().reset_index()
def params_to_dict(params: lmfit.Parameters, kind: str = "values") -> dict:
"""
Converts fit result parameters to a dict
Parameters
----------
params : lmfit.Parameters
fit result parameters
kind : str, optional
["values", "stderr"], by default "values"
Returns
-------
dict
Dict containing the parameternames as key and the values or stderr as values
"""
result_dict = {}
for name, param in params.items():
if kind == "values":
result_dict[name] = param.value
elif kind == "stderr":
result_dict[name] = param.stderr
return result_dict
def params_to_df(
params: lmfit.Parameters, param_inverted_stderr: Iterable[str] = []
) -> pd.DataFrame:
"""
Returns a DataFrame with the values and stderr of the params
Parameters
----------
params : lmfit.Parameters
fit result parameters
param_inverted_stderr : Iterable[str], optional
iterable of parameternames with should be inverted,
to calculate the extrema. , by default []
Returns
-------
pd.DataFrame
DataFrame with columns "value" and "stderr", parameternames as index
"""
param_vals = params_to_dict(params)
param_stderrs = params_to_dict(params, kind="stderr")
param_df = pd.DataFrame({"values": param_vals, "stderr": param_stderrs})
param_df.loc[param_inverted_stderr, "stderr"] = -param_df.loc[
param_inverted_stderr, "stderr"
]
return param_df
def get_fit_param_results_row(
region: str,
parent_region: str,
subset: str,
fit_result: Dict[str, Union[lmfit.model.ModelResult, pd.DataFrame]],
) -> pd.DataFrame:
"""
Returns a row containing all fitted parameters for a region,
which can than be combined to a fit param results dataframe
Parameters
----------
region : str
Value of the fitted region
parent_region : str
Parent region of the fitted region
subset:str
Subset of the regions data which was fitted
fit_result : Dict[str, Union[lmfit.model.ModelResult, pd.DataFrame]]
Result of fit_data_model or its implementation
Returns
-------
pd.DataFrame
Row of fit param results dataframe, for the fitted region
See Also
--------
covid19_data_analyzer.data_functions.analysis.factory_functions.fit_data_model
"""
flat_params_df = pd.DataFrame(
[{"region": region, "parent_region": parent_region, "subset": subset}]
)
params_df = params_to_df(fit_result["model_result"].params)
transformed_df = (
params_df.reset_index()
.melt(id_vars="index", var_name="kind")
.sort_values("index")
)
new_index = transformed_df["index"] + " " + transformed_df["kind"]
transformed_df = (
transformed_df.set_index(new_index).drop(["index", "kind"], axis=1).T
)
flat_params_df = flat_params_df.join(transformed_df.reset_index(drop=True))
return flat_params_df
def translate_funkeinteraktiv_fit_data():
"""
Helperfunction to prevent Fitting overhead,
which would be caused if the same dataset with de and en
region names would be fitted.
Rather than fitting twice, this function simply translates
the german region names to the english ones, which were both extracted by
'get_funkeinteraktiv_data'.
"""
source_dir = get_data_path("funkeinteraktiv_de")
target_dir = get_data_path("funkeinteraktiv_en")
translate_path = source_dir / "translation_table.csv"
translate_df = | pd.read_csv(translate_path) | pandas.read_csv |
#
# Copyright (c) 2015 - 2022, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
GEOPM IO - Helper module for parsing/processing report and trace files.
"""
from __future__ import absolute_import
from __future__ import division
from builtins import str
from collections import OrderedDict
import os
import json
import re
import pandas
import numpy
import glob
import sys
import subprocess
import psutil
import copy
import yaml
import io
import hashlib
from distutils.spawn import find_executable
from natsort import natsorted
from . import __version__
from . import update_report
try:
_, os.environ['COLUMNS'] = subprocess.check_output(['stty', 'size']).decode().split()
except subprocess.CalledProcessError:
os.environ['COLUMNS'] = "200"
pandas.set_option('display.width', int(os.environ['COLUMNS']))
pandas.set_option('display.max_colwidth', 80)
pandas.set_option('max_columns', 100)
class AppOutput(object):
"""The container class for all trace related data.
This class holds the relevant objects for parsing and indexing all
data that is output from GEOPM. This object can be created with a
a trace glob string that will be used
to search dir_name for the relevant files. If files are found
their data will be parsed into objects for easy data access.
Additionally a Pandas DataFrame is constructed containing all of
all of the
trace data. These DataFrames are indexed based on the version of
GEOPM found in the files, the profile name, agent name, and the number
of times that particular configuration has been seen by the parser
(i.e. experiment iteration).
Attributes:
trace_glob: The string pattern to use to search for trace files.
dir_name: The directory path to use when searching for files.
verbose: A bool to control whether verbose output is printed to stdout.
"""
def __init__(self, traces=None, dir_name='.', verbose=False, do_cache=True):
self._traces = {}
self._traces_df = pandas.DataFrame()
self._all_paths = []
self._index_tracker = IndexTracker()
self._node_names = None
self._region_names = None
if traces:
if type(traces) is list:
trace_paths = [os.path.join(dir_name, path) for path in traces]
else:
trace_glob = os.path.join(dir_name, traces)
try:
trace_paths = glob.glob(trace_glob)
except TypeError:
raise TypeError('<geopm> geopmpy.io: AppOutput: traces must be a list of paths or a glob pattern')
trace_paths = natsorted(trace_paths)
if len(trace_paths) == 0:
raise RuntimeError('<geopm> geopmpy.io: No trace files found with pattern {}.'.format(trace_glob))
self._all_paths.extend(trace_paths)
self._index_tracker.reset()
if do_cache:
# unique cache name based on trace files in this list
paths_str = str(trace_paths)
try:
h5_id = hashlib.shake_256(paths_str.encode()).hexdigest(14)
except AttributeError:
h5_id = hash(paths_str)
trace_h5_name = 'trace_{}.h5'.format(h5_id)
self._all_paths.append(trace_h5_name)
# check if cache is older than traces
if os.path.exists(trace_h5_name):
cache_mod_time = os.path.getmtime(trace_h5_name)
regen_cache = False
for trace_file in trace_paths:
mod_time = os.path.getmtime(trace_file)
if mod_time > cache_mod_time:
regen_cache = True
if regen_cache:
os.remove(trace_h5_name)
try:
self._traces_df = pandas.read_hdf(trace_h5_name, 'trace')
if verbose:
sys.stdout.write('Loaded traces from {}.\n'.format(trace_h5_name))
except IOError as err:
sys.stderr.write('Warning: <geopm> geopmpy.io: Trace HDF5 file not detected or older than traces. Data will be saved to {}.\n'
.format(trace_h5_name))
self.parse_traces(trace_paths, verbose)
# Cache traces dataframe
try:
if verbose:
sys.stdout.write('Generating HDF5 files... ')
self._traces_df.to_hdf(trace_h5_name, 'trace')
except ImportError as error:
sys.stderr.write('Warning: <geopm> geopmpy.io: Unable to write HDF5 file: {}\n'.format(str(error)))
if verbose:
sys.stdout.write('Done.\n')
sys.stdout.flush()
else:
self.parse_traces(trace_paths, verbose)
def parse_traces(self, trace_paths, verbose):
traces_df_list = []
fileno = 1
filesize = 0
for tp in trace_paths: # Get size of all trace files
filesize += os.stat(tp).st_size
# Abort if traces are too large
avail_mem = psutil.virtual_memory().available
if filesize > avail_mem // 2:
sys.stderr.write('Warning: <geopm> geopmpy.io: Total size of traces is greater than 50% of available memory. Parsing traces will be skipped.\n')
return
filesize = '{}MiB'.format(filesize // 1024 // 1024)
for tp in trace_paths:
if verbose:
sys.stdout.write('\rParsing trace file {} of {} ({})... '.format(fileno, len(trace_paths), filesize))
sys.stdout.flush()
fileno += 1
tt = Trace(tp)
self.add_trace_df(tt, traces_df_list) # Handles multiple traces per node
if verbose:
sys.stdout.write('Done.\n')
sys.stdout.flush()
if verbose:
sys.stdout.write('Creating combined traces DF... ')
sys.stdout.flush()
self._traces_df = pandas.concat(traces_df_list)
self._traces_df = self._traces_df.sort_index(ascending=True)
if verbose:
sys.stdout.write('Done.\n')
sys.stdout.flush()
def remove_files(self):
"""Deletes all files currently tracked by this object."""
for ff in self._all_paths:
try:
os.remove(ff)
except OSError:
pass
def add_trace_df(self, tt, traces_df_list):
"""Adds a trace DataFrame to the tracking list.
The report tracking list is used to create the combined
DataFrame once all reports are parsed.
Args:
tt: The Trace object used to extract the Trace DataFrame.
This DataFrame will be indexed and added to the
tracking list.
"""
tdf = tt.get_df() # TODO: this needs numeric cols optimization
tdf = tdf.set_index(self._index_tracker.get_multiindex(tt))
traces_df_list.append(tdf)
def get_trace_data(self, node_name=None):
idx = pandas.IndexSlice
df = self._traces_df
if node_name is not None:
df = df.loc[idx[:, :, :, :, node_name, :, :], ]
return df
def get_trace_df(self):
"""Getter for the combined DataFrame of all trace files parsed.
This DataFrame contains all data parsed, and has a complex
MultiIndex for accessing the unique data from each individual
trace. For more information on this index, see the
IndexTracker docstring.
Returns:
pandas.DataFrame: Contains all parsed data.
"""
return self._traces_df
class IndexTracker(object):
"""Tracks and uniquely identifies experiment configurations for
DataFrame indexing.
This object's purpose is to examine parsed data for reports or
traces and determine if a particular experiment configuration has
already been tracked. A user may run the same configuration
repeatedly in order to prove that results are repeatable and are
not outliers. Since the same configuration is used many times, it
must be tracked and counted to ensure that the unique data for
each run can be extracted later.
The parsed data is used to extract the following fields to build
the tracking index tuple:
(<GEOPM_VERSION>, <PROFILE_NAME>, <AGENT_NAME>, <NODE_NAME>)
If the tuple not contained in the _run_outputs dict, it is
inserted with a value of 1. The value is incremented if the tuple
is currently in the _run_outputs dict. This value is used to
uniquely identify a particular set of parsed data when the
MultiIndex is created.
"""
def __init__(self):
self._run_outputs = {}
def _check_increment(self, run_output):
"""Extracts the index tuple from the parsed data and tracks it.
Checks to see if the current run_output has been seen before.
If so, the count is incremented. Otherwise it is stored as 1.
Args:
run_output: The Trace object to be tracked.
"""
index = (run_output.get_version(), run_output.get_start_time(),
os.path.basename(run_output.get_profile_name()),
run_output.get_agent(), run_output.get_node_name())
if index not in self._run_outputs:
self._run_outputs[index] = 1
else:
self._run_outputs[index] += 1
def _get_base_index(self, run_output):
"""Constructs the actual index tuple to be used to construct a
uniquely-identifying MultiIndex for this data.
Takes a run_output as input, and returns the unique tuple to
identify this run_output in the DataFrame. Note that this
method appends the current experiment iteration to the end of
the returned tuple. E.g.:
>>> self._index_tracker.get_base_index(rr)
('0.1.1+dev365gfcda929', 'geopm_test_integration', 170,
'static_policy', 'power_balancing', 'mr-fusion2', 1)
Args:
run_output: The Trace object to produce an index tuple for.
Returns:
Tuple: This will contain all of the index fields needed to uniquely identify this data (including the
count of how many times this experiment has been seen.
"""
key = (run_output.get_version(), run_output.get_start_time(),
os.path.basename(run_output.get_profile_name()),
run_output.get_agent(), run_output.get_node_name())
return key + (self._run_outputs[key], )
def get_multiindex(self, run_output):
"""Returns a MultiIndex from this run_output. Used in DataFrame construction.
This will add the current run_output to the list of tracked
data, and return a unique muiltiindex tuple to identify this
data in a DataFrame.
For Trace objects, the integer index of the DataFrame is
appended to the tuple.
Args:
run_output: The Trace object to produce an index
tuple for.
Returns:
pandas.MultiIndex: The unique index to identify this data object.
"""
self._check_increment(run_output)
itl = []
index_names = ['version', 'start_time', 'name', 'agent', 'node_name', 'iteration']
# Trace file index
index_names.append('index')
for ii in range(len(run_output.get_df())): # Append the integer index to the DataFrame index
itl.append(self._get_base_index(run_output) + (ii, ))
mi = pandas.MultiIndex.from_tuples(itl, names=index_names)
return mi
def reset(self):
"""Clears the internal tracking dictionary.
Since only one type of data (reports OR traces) can be tracked
at once, this is necessary to reset the object's state so a
new type of data can be tracked.
"""
self._run_outputs = {}
class Trace(object):
"""Creates a pandas DataFrame comprised of the trace file data.
This object will parse both the header and the CSV data in a trace
file. The header identifies the uniquely-identifying configuration
for this file which is used for later indexing purposes.
Even though __getattr__() and __getitem__() allow this object to
effectively be treated like a DataFrame, you must use get_df() if
you're building a list of DataFrames to pass to pandas.concat().
Using the raw object in a list and calling concat will cause an
error.
Attributes:
trace_path: The path to the trace file to parse.
"""
def __init__(self, trace_path, use_agent=True):
self._path = trace_path
old_headers = {'time': 'TIME',
'epoch_count': 'EPOCH_COUNT',
'region_hash': 'REGION_HASH',
'region_hint': 'REGION_HINT',
'region_progress': 'REGION_PROGRESS',
'region_count': 'REGION_COUNT',
'region_runtime': 'REGION_RUNTIME',
'energy_package': 'ENERGY_PACKAGE',
'energy_dram': 'ENERGY_DRAM',
'power_package': 'POWER_PACKAGE',
'power_dram': 'POWER_DRAM',
'frequency': 'FREQUENCY',
'cycles_thread': 'CYCLES_THREAD',
'cycles_reference': 'CYCLES_REFERENCE',
'temperature_core': 'TEMPERATURE_CORE'}
old_balancer_headers = {'policy_power_cap': 'POLICY_POWER_CAP',
'policy_step_count': 'POLICY_STEP_COUNT',
'policy_max_epoch_runtime': 'POLICY_MAX_EPOCH_RUNTIME',
'policy_power_slack': 'POLICY_POWER_SLACK',
'epoch_runtime': 'EPOCH_RUNTIME',
'power_limit': 'POWER_LIMIT',
'enforced_power_limit': 'ENFORCED_POWER_LIMIT'}
old_headers.update(old_balancer_headers)
old_governor_headers = {'power_budget': 'POWER_BUDGET'}
old_headers.update(old_governor_headers)
# Need to determine how many lines are in the header
# explicitly. We cannot use '#' as a comment character since
# it occurs in raw MSR signal names.
skiprows = 0
with open(trace_path) as fid:
for ll in fid:
if ll.startswith('#'):
skiprows += 1
else:
break
column_headers = pandas.read_csv(trace_path, sep='|', skiprows=skiprows, nrows=0, encoding='utf-8').columns.tolist()
original_headers = copy.deepcopy(column_headers)
column_headers = [old_headers.get(ii, ii) for ii in column_headers]
if column_headers != original_headers:
sys.stderr.write('Warning: <geopm> geopmpy.io: Old trace file format detected. Old column headers will be forced ' \
'to UPPERCASE.\n')
# region_hash and region_hint must be a string for pretty printing pandas DataFrames
# You can force them to int64 by setting up a converter function then passing the hex string through it
# with the read_csv call, but the number will be displayed as an integer from then on. You'd have to convert
# it back to a hex string to compare it with the data in the reports.
self._df = pandas.read_csv(trace_path, sep='|', skiprows=skiprows, header=0, names=column_headers, encoding='utf-8',
dtype={'REGION_HASH': 'unicode', 'REGION_HINT': 'unicode'})
self._df.columns = list(map(str.strip, self._df[:0])) # Strip whitespace from column names
self._df['REGION_HASH'] = self._df['REGION_HASH'].astype('unicode').map(str.strip) # Strip whitespace from region hashes
self._df['REGION_HINT'] = self._df['REGION_HINT'].astype('unicode').map(str.strip) # Strip whitespace from region hints
self._version = None
self._start_time = None
self._profile_name = None
self._agent = None
self._node_name = None
self._use_agent = use_agent
self._parse_header(trace_path)
def __repr__(self):
return self._df.__repr__()
def __str__(self):
return self.__repr__()
def __getattr__(self, attr):
"""Pass through attribute requests to the underlying DataFrame.
This allows for Trace objects to be treated like DataFrames
for analysis. You can do things like:
>>> tt = geopmpy.io.Trace('170-4-balanced-minife-trace-mr-fusion5')
>>> tt.keys()
Index([u'region_hash', u'region_hint', u'seconds', u'pkg_energy-0', u'dram_energy-0',...
"""
return getattr(self._df, attr)
def __getitem__(self, key):
"""Pass through item requests to the underlying DataFrame.
This allows standard DataFrame slicing operations to take place.
@todo, update
>>> tt[['region_hash', 'region_hint', 'time', 'energy_package', 'energy_dram']][:5]
region_hash region_hint time energy_package-0 energy_dram-0
0 2305843009213693952 0.662906 106012.363770 25631.015519
1 2305843009213693952 0.667854 106012.873718 25631.045777
2 2305843009213693952 0.672882 106013.411621 25631.075807
3 2305843009213693952 0.677869 106013.998108 25631.105882
4 2305843009213693952 0.682849 106014.621704 25631.136186
"""
return self._df.__getitem__(key)
def _parse_header(self, trace_path):
"""Parses the configuration header out of the top of the trace file.
Args:
trace_path: The path to the trace file to parse.
"""
done = False
out = []
with open(trace_path) as fid:
while not done:
ll = fid.readline()
if ll.startswith('#'):
out.append(ll[1:])
else:
done = True
try:
yaml_fd = io.StringIO(u''.join(out))
dd = yaml.load(yaml_fd, Loader=yaml.SafeLoader)
except yaml.parser.ParserError:
out.insert(0, '{')
out.append('}')
json_str = ''.join(out)
dd = json.loads(json_str)
try:
self._version = dd['geopm_version']
self._start_time = dd['start_time']
self._profile_name = dd['profile_name']
if self._use_agent:
self._agent = dd['agent']
self._node_name = dd['node_name']
except KeyError:
raise SyntaxError('<geopm> geopmpy.io: Trace file header could not be parsed!')
def get_df(self):
return self._df
def get_version(self):
return self._version
def get_start_time(self):
return self._start_time
def get_profile_name(self):
return self._profile_name
def get_agent(self):
return self._agent
def get_node_name(self):
return self._node_name
@staticmethod
def diff_df(trace_df, column_regex, epoch=True):
"""Diff the DataFrame.
Since the counters in the trace files are monotonically
increasing, a diff must be performed to extract the useful
data.
Args:
trace_df: The MultiIndexed DataFrame created by the
AppOutput class.
column_regex: A string representing the regex search
pattern for the column names to diff.
epoch: A flag to set whether or not to focus solely on
epoch regions.
Returns:
pandas.DataFrame: With the diffed columns specified by
'column_regex', and an 'elapsed_time'
column.
Todo:
* Should I drop everything before the first epoch if
'epoch' is false?
"""
# drop_duplicates() is a workaround for #662. Duplicate data
# rows are showing up in the trace for unmarked.
tmp_df = trace_df.drop_duplicates()
filtered_df = tmp_df.filter(regex=column_regex).copy()
filtered_df['elapsed_time'] = tmp_df['time']
if epoch:
filtered_df['epoch_count'] = tmp_df['epoch_count']
filtered_df = filtered_df.diff()
# The following drops all 0's and the negative sample when traversing between 2 trace files.
# If the epoch_count column is included, this will also drop rows occuring mid-epoch.
filtered_df = filtered_df.loc[(filtered_df > 0).all(axis=1)]
# Reset 'index' to be 0 to the length of the unique trace files
traces_list = []
for (version, start_time, name, agent, node_name, iteration), df in \
filtered_df.groupby(level=['version', 'start_time', 'name', 'agent', 'node_name', 'iteration']):
df = df.reset_index(level='index')
df['index'] = pandas.Series(numpy.arange(len(df)), index=df.index)
df = df.set_index('index', append=True)
traces_list.append(df)
return pandas.concat(traces_list)
@staticmethod
def get_median_df(trace_df, column_regex, config):
"""Extract the median experiment iteration.
This logic calculates the sum of elapsed times for all of the
experiment iterations for all nodes in that iteration. It
then extracts the DataFrame for the iteration that is closest
to the median. For input DataFrames with a single iteration,
the single iteration is returned.
Args:
trace_df: The MultiIndexed DataFrame created by the
AppOutput class.
column_regex: A string representing the regex search
pattern for the column names to diff.
config: The TraceConfig object being used presently.
Returns:
pandas.DataFrame: Containing a single experiment iteration.
"""
diffed_trace_df = Trace.diff_df(trace_df, column_regex, config.epoch_only)
idx = pandas.IndexSlice
et_sums = diffed_trace_df.groupby(level=['iteration'])['elapsed_time'].sum()
median_index = (et_sums - et_sums.median()).abs().sort_values().index[0]
median_df = diffed_trace_df.loc[idx[:, :, :, :, :, median_index], ]
if config.verbose:
median_df_index = []
median_df_index.append(median_df.index.get_level_values('version').unique()[0])
median_df_index.append(median_df.index.get_level_values('start_time').unique()[0])
median_df_index.append(median_df.index.get_level_values('name').unique()[0])
median_df_index.append(median_df.index.get_level_values('agent').unique()[0])
median_df_index.append(median_df.index.get_level_values('iteration').unique()[0])
sys.stdout.write('Median DF index = ({})...\n'.format(' '.join(str(s) for s in median_df_index)))
sys.stdout.flush()
return median_df
class BenchConf(object):
"""The application configuration parameters.
Used to hold the config data for the integration test application.
This application allows for varying combinations of regions
(compute, IO, or network bound), complexity, desired execution
count, and amount of imbalance between nodes during execution.
Attributes:
path: The output path for this configuration file.
"""
def __init__(self, path):
self._path = path
self._loop_count = 1
self._region = []
self._big_o = []
self._hostname = []
self._imbalance = []
def __repr__(self):
template = """\
path : {path}
regions : {regions}
big-o : {big_o}
loop count: {loops}
hostnames : {hosts}
imbalance : {imbalance}
"""
return template.format(path=self._path,
regions=self._region,
big_o=self._big_o,
loops=self._loop_count,
hosts=self._hostname,
imbalance=self._imbalance)
def __str__(self):
return self.__repr__()
def set_loop_count(self, loop_count):
self._loop_count = loop_count
def append_region(self, name, big_o):
"""Appends a region to the internal list.
Args:
name: The string representation of the region.
big_o: The desired complexity of the region. This
affects compute, IO, or network complexity
depending on the type of region requested.
"""
self._region.append(name)
self._big_o.append(big_o)
def append_imbalance(self, hostname, imbalance):
"""Appends imbalance to the config for a particular node.
Args:
hostname: The name of the node.
imbalance: The amount of imbalance to apply to the node.
This is specified by a float in the range
[0,1]. For example, specifying a value of 0.25
means that this node will spend 25% more time
executing the work than a node would by
default. Nodes not specified with imbalance
configurations will perform normally.
"""
self._hostname.append(hostname)
self._imbalance.append(imbalance)
def get_path(self):
return self._path
def write(self):
"""Write the current config to a file."""
obj = {'loop-count': self._loop_count,
'region': self._region,
'big-o': self._big_o}
if (self._imbalance and self._hostname):
obj['imbalance'] = self._imbalance
obj['hostname'] = self._hostname
with open(self._path, 'w') as fid:
json.dump(obj, fid)
def get_exec_path(self):
# Using libtool causes sporadic issues with the Intel
# toolchain.
result = 'geopmbench'
path = find_executable(result)
source_dir = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.realpath(__file__))))
source_bin = os.path.join(source_dir, '.libs', 'geopmbench')
if not path:
result = source_bin
else:
with open(path, 'rb') as fid:
buffer = fid.read(4096)
if b'Generated by libtool' in buffer:
result = source_bin
return result
def get_exec_args(self):
return [self._path]
class RawReport(object):
def __init__(self, path):
update_report.update_report(path)
# Fix issue with python yaml module where it is confused
# about floating point numbers of the form "1e+10" where
# the decimal point is missing.
# See PR: https://github.com/yaml/pyyaml/pull/174
# for upstream fix to pyyaml
loader = yaml.SafeLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\.[0-9_]+(?:[eE][-+]?[0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
with open(path) as fid:
self._raw_dict = yaml.load(fid, Loader=loader)
def raw_report(self):
return copy.deepcopy(self._raw_dict)
def dump_json(self, path):
jdata = json.dumps(self._raw_dict)
with open(path, 'w') as fid:
fid.write(jdata)
def meta_data(self):
result = dict()
all_keys = ['GEOPM Version',
'Start Time',
'Profile',
'Agent',
'Policy']
for kk in all_keys:
result[kk] = self._raw_dict[kk]
return result
def figure_of_merit(self):
result = None
try:
result = copy.deepcopy(self._raw_dict['Figure of Merit'])
except:
pass
return result
def total_runtime(self):
result = None
try:
result = copy.deepcopy(self._raw_dict['Total Runtime'])
except:
pass
return result
def host_names(self):
return list(self._raw_dict['Hosts'].keys())
def region_names(self, host_name):
return [rr['region'] for rr in self._raw_dict['Hosts'][host_name]['Regions']]
def raw_region(self, host_name, region_name):
result = None
for rr in self._raw_dict['Hosts'][host_name]['Regions']:
if rr['region'] == region_name:
result = copy.deepcopy(rr)
if not result:
raise RuntimeError('region name: {} not found'.format(region_name))
return result
def raw_unmarked(self, host_name):
host_data = self._raw_dict["Hosts"][host_name]
key = 'Unmarked Totals'
return copy.deepcopy(host_data[key])
def raw_epoch(self, host_name):
host_data = self._raw_dict["Hosts"][host_name]
key = 'Epoch Totals'
return copy.deepcopy(host_data[key])
def raw_totals(self, host_name):
host_data = self._raw_dict["Hosts"][host_name]
key = 'Application Totals'
return copy.deepcopy(host_data[key])
def agent_host_additions(self, host_name):
# other keys that are not region, epoch, or app
# total i.e. from Agent::report_host()
host_data = self._raw_dict[host_name]
result = {}
for key, val in host_data.items():
if key not in ['Epoch Totals', 'Application Totals'] and not key.startswith('Region '):
result[key] = copy.deepcopy(val)
return result
def get_field(self, raw_data, key, units=''):
matches = [(len(kk), kk) for kk in raw_data if key in kk and units in kk]
if len(matches) == 0:
raise KeyError('<geopm> geopmpy.io: Field not found: {}'.format(key))
match = sorted(matches)[0][1]
return copy.deepcopy(raw_data[match])
class RawReportCollection(object):
'''
Used to group together a collection of related RawReports.
'''
def __init__(self, report_paths, dir_name='.', dir_cache=None, verbose=True, do_cache=True):
self._reports_df = | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 7 21:33:48 2021
@author: David
"""
import sys
sys.path.append('.')
# import os
# import inspect
from datetime import date
from pathlib import Path
import locale
import pandas as pd
import numpy as np
import scipy.signal as sig
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib.ticker import MultipleLocator
from matplotlib import gridspec
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
import fig_util
from IPython.display import display, Image
INPUT_PATH = r'..\data\RKI\Hospitalisierungen'
OUTPUT_PATH = r'..\output\Hospitalization_Nowcast2'
FILE_PATTERN = '{year:04d}-{month:02d}-{day:02d}_Deutschland_COVID-19-Hospitalisierungen.csv'
START_DATE = '2021-07-29' #'2021-08-01'
END_DATE = date.today().strftime('%Y-%m-%d') # '2021-11-12'
MAX_TRI_LEN = 21
ALL_DATE_RANGE = ['2020-03-03', END_DATE]
#END_DATE = '2021-11-09'
BL_FILTER = 'Thรผringen'
BL_FILTER = 'Sachsen' # 200
BL_FILTER = 'Rheinland-Pfalz' # 80
BL_FILTER = 'Berlin' # 160
BL_FILTER = 'Schleswig-Holstein' # 90
BL_FILTER = 'Brandenburg' # 160
BL_FILTER = 'Hessen' # 140
BL_FILTER = 'Niedersachsen' # 70
BL_FILTER = 'Hamburg' # 120
BL_FILTER = 'Baden-Wรผrttemberg' # 100
BL_FILTER = 'Nordrhein-Westfalen' # 100
BL_FILTER = 'Bayern' # 140
BL_FILTER = 'Bundesgebiet' # 100
yscale_table = {
'00-04': 4,
'05-14': 2.5,
'15-34': 7,
'35-59': 12,
'60-79': 25,
'80+': 60,
'00+': 15,
'all': 100
}
DO_SEPERATE_TOTAL = True
# SHOW_ONLY_THESE_AG = None
# SHOW_ONLY_THESE_AG = [
# '35-59',
# '60-79',
# '80+',
# '00+'
# ]
SHOW_ONLY_THESE_AG = [
'00+'
]
if DO_SEPERATE_TOTAL:
AG_LIST = [
'00-04',
'05-14',
'15-34',
'35-59',
'60-79',
'80+'
]
else:
AG_LIST = [
'00-04',
'05-14',
'15-34',
'35-59',
'60-79',
'80+',
'00+'
]
SLATE = (0.15, 0.15, 0.15)
# POP_LUT = {
# '00-04': 39.69100,
# '05-14': 75.08700,
# '15-34': 189.21300,
# '35-59': 286.66200,
# '60-79': 181.53300,
# '80+': 59.36400,
# '00+': 831.55000
# }
ytck_table = {
'00-04': 0.1,
'05-14': 0.05,
'15-34': 0.2,
'35-59': 0.25,
'60-79': 0.5,
'80+': 1,
'00+': 0.25,
'all': 2
}
plt_col_table = {
'00-04': (0.8, 0.0, 0.8),
'05-14': (0, 0.5, 0.5),
'15-34': (1, 0.7, 0),
'35-59': (1, 0, 0),
'60-79': (0.6, 0.6, 1),
'80+': (0, 0, 1),
'00+': (0, 0, 0)
}
# %%
plt.rc('axes', axisbelow=True)
locale.setlocale(locale.LC_TIME, 'de-DE')
assert(Path(INPUT_PATH).is_dir())
Path(OUTPUT_PATH).mkdir(parents=True, exist_ok=True)
POP_LUT = | pd.read_csv(r'../data/LUT/Bundeslaender2.tsv', sep='\t', comment='#', index_col='Gebiet') | pandas.read_csv |
"""
<NAME>, <EMAIL>
<NAME>, <EMAIL>
seoulai.com
2018
"""
import pandas as pd
from seoulai_gym.envs.traders.base import Constants
import os
class Price(Constants):
def __init__(
self,
price_list_size: int=1000, # trading game size
tick: int=0,
):
"""Price constructor.
Args:
size: Price length.
"""
self.stock_total_volume = 2000
self.init()
def init(
self,
) -> None:
"""Initialize trading data set.
TODO: add volume, crypto networking value and etc...
"""
price_file = os.path.abspath(os.path.join(
os.path.dirname(__file__), "bitcoin_price.csv"))
df = | pd.read_csv(price_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 9 15:33:46 2018
@author: <NAME>
"""
import cantera as ct
from .. import simulation as sim
from ...cti_core import cti_processor as ctp
import pandas as pd
import numpy as np
import time
import copy
import re
class JSR_steadystate(sim.Simulation):
'''Child class of sim.Simulaton. Inherits all attributes and methods including __init__().
Also has internal init due to data requirements. JSR_steadystate is a single jet-stirred-reactor simulation
and may be used to simulate a jet-stirred-reactor experiment at a single constant temperature, pressure, and
mole fraction conditions.'''
def __init__(self,pressure:float,temperature:float,observables:list,
kineticSens:int,physicalSens:int,conditions:dict,thermalBoundary,mechanicalBoundary,
processor:ctp.Processor=None,cti_path="",
save_physSensHistories=0,moleFractionObservables:list=[],
absorbanceObservables:list=[],concentrationObservables:list=[],
fullParsedYamlFile:dict={},residence_time:float=1.0,pvalveCoefficient:float=0.01,
maxpRise:float=0.001,save_timeHistories:int=0,rtol:float=1e-14,atol:float=1e-15):
#sim.Simulation.__init__(self,pressure,temperature,observables,kineticSens,physicalSens,
# conditions,processor,cti_path)
#set up processor and initialize all variables common to every simulation
if processor!=None and cti_path!="":
print("Error: Cannot give both a processor and a cti file path, pick one")
elif processor==None and cti_path=="":
print("Error: Must give either a processor or a cti file path")
if processor != None:
self.processor = processor
elif cti_path!="":
self.processor = ctp.Processor(cti_path)
self.pressure=pressure
'''Pressure of the reactor in atm'''
self.temperature=temperature
'''Temperature of the reactor in K'''
self.observables=observables
'''Observables to be used in kinetic sensitivity analysis. Required only if `self.kineticSens` is set to 1'''
self.kineticSens=kineticSens
'''Set to 1 to run kinetic sensitivity analysis, 0 otherwise'''
self.physicalSens=physicalSens
'''Deprecated'''
self.conditions=conditions
'''Mole fractions for the gas flow into the reactor. Accepts a dictionary similarly to {\'H2\':0.1,\'O2\':0.1,\'Ar\':0.8}'''
self.cti_path=cti_path
'''Full file path for Cantera cti file. Only needed if `self.processor` has not been defined.'''
self.thermalBoundary = thermalBoundary
'''Deprecated '''
self.mechanicalBoundary = mechanicalBoundary
'''Deprecated'''
self.kineticSensitivities= None
'''Stores an array of the kinetic sensitivities after calculation'''
self.experimentalData = None
'''Deprecated'''
self.concentrationObservables = concentrationObservables
self.moleFractionObservables = moleFractionObservables
self.absorbanceObservables = absorbanceObservables
self.fullParsedYamlFile = fullParsedYamlFile
self.pvalveCoefficient=pvalveCoefficient
'''Controls the sensitivity of the pressure valve in the reactor network. For most use cases, leave default value as is.
Only adjust if final pressure substantially deviates from value set in `self.pressure`'''
self.maxPrise=maxpRise
'''Max pressure rise allowed. Leave unchanged in most use cases.
Only adjust if final pressure substantially deviates from value set in `self.pressure`'''
self.energycon='off'
'''Always set to \'off\' '''
self.residence_time=residence_time
'''Residence time of the reactor'''
self.timeHistory = None
self.experimentalData = None
if save_timeHistories == 1:
self.timeHistories=[]
self.timeHistoryInterpToExperiment = None
self.pressureAndTemperatureToExperiment = None
else:
self.timeHistories=None
if save_physSensHistories == 1:
self.physSensHistories = []
self.setTPX()
self.dk = 0.01
self.rtol=rtol
self.atol=atol
self.solution=None
def set_geometry(self,volume=0.1):
self.reactor_volume=volume
def printVars(self):
print()
def settingJSRConditions(self):
'''
Determine the mechanical and thermal boundary conditions for a
shock tube.
'''
#assigning the thermal boundary variable
if re.match('[aA]diabatic',self.thermalBoundary):
energy = 'on'
elif re.match('[iI]sothermal',self.thermalBoundary):
energy = 'off'
else:
raise Exception('Please specify a thermal boundary condition, adiabatic or isothermal')
#assigning the mehcanical boundary variable
if re.match('[Cc]onstant [Pp]ressure',self.mechanicalBoundary):
mechBoundary = 'constant pressure'
elif re.match('[Cc]onstant [Vv]olume',self.mechanicalBoundary):
mechBoundary = 'constant volume'
else:
raise Exception('Please specifiy a mehcanical boundary condition, constant pressure or constant volume')
#return the thermal and mechanical boundary of the shock tube
return energy,mechBoundary
def sensitivity_adjustment(self,temp_del:float=0.0,
pres_del:float=0.0,
spec_pair:(str,float)=('',0.0)):
#this is where we would make the dk fix
if temp_del != 0.0:
self.dk.append(temp_del)
if pres_del != 0.0:
self.dk.append(pres_del)
if spec_pair[1] != 0.0:
self.dk.append(spec_pair[1])
kin_temp = self.kineticSens
self.kineticSens = 0
data = sim.Simulation.sensitivity_adjustment(self,temp_del,pres_del,spec_pair)
self.kineticSens = kin_temp
return data
def run_single(self):
gas=self.processor.solution
reactorPressure=gas.P
self.reactorPressure=self.processor.solution.P
pressureValveCoefficient=self.pvalveCoefficient
maxPressureRiseAllowed=self.maxPrise
print(maxPressureRiseAllowed,self.reactorPressure,pressureValveCoefficient)
#Build the system components for JSR
pretic=time.time()
if bool(self.observables) and self.kineticSens==1:
###################################################################
#Block to create temp reactor network to pre-solve JSR without kinetic sens
ct.suppress_thermo_warnings()
tempgas=ct.Solution(self.processor.cti_path)
tempgas.TPX=self.processor.solution.TPX
tempfuelAirMixtureTank=ct.Reservoir(tempgas)
tempexhaust=ct.Reservoir(tempgas)
tempstirredReactor=ct.IdealGasReactor(tempgas,energy=self.energycon,
volume=self.reactor_volume)
tempmassFlowController=ct.MassFlowController(upstream=tempfuelAirMixtureTank,
downstream=tempstirredReactor,
mdot=tempstirredReactor.mass/self.residence_time)
tempPressureRegulator=ct.Valve(upstream=tempstirredReactor,downstream=tempexhaust,
K=pressureValveCoefficient)
tempreactorNetwork=ct.ReactorNet([tempstirredReactor])
tempreactorNetwork.rtol = self.rtol
tempreactorNetwork.atol = self.atol
print(self.rtol,self.atol)
tempreactorNetwork.advance_to_steady_state()
###################################################################
#reactorNetwork.advance_to_steady_state()
#reactorNetwork.reinitialize()
elif self.kineticSens and bool(self.observables)==False:
#except:
print('Please supply a non-empty list of observables for sensitivity analysis or set kinetic_sens=0')
pretoc=time.time()
print('Presolving Took {:3.2f}s to compute'.format(pretoc-pretic))
fuelAirMixtureTank=ct.Reservoir(self.processor.solution)
exhaust=ct.Reservoir(self.processor.solution)
if bool(self.observables) and self.kineticSens==1:
stirredReactor=ct.IdealGasReactor(tempgas,energy=self.energycon,
volume=self.reactor_volume)
else:
stirredReactor=ct.IdealGasReactor(self.processor.solution,energy=self.energycon,
volume=self.reactor_volume)
#stirredReactor=ct.IdealGasReactor(self.processor.solution,energy=self.energycon,
# volume=self.reactor_volume)
massFlowController=ct.MassFlowController(upstream=fuelAirMixtureTank,
downstream=stirredReactor,
mdot=stirredReactor.mass/self.residence_time)
pressureRegulator=ct.Valve(upstream=stirredReactor,downstream=exhaust,K=pressureValveCoefficient)
reactorNetwork=ct.ReactorNet([stirredReactor])
if bool(self.observables) and self.kineticSens==1:
for i in range(gas.n_reactions):
stirredReactor.add_sensitivity_reaction(i)
reactorNetwork.rtol_sensitivity=0.0000001
reactorNetwork.atol_sensitivity=0.00000001
print('Sens tols:'+str(reactorNetwork.atol_sensitivity)+', '+str(reactorNetwork.rtol_sensitivity))
# now compile a list of all variables for which we will store data
columnNames = [stirredReactor.component_name(item) for item in range(stirredReactor.n_vars)]
columnNames = ['pressure'] + columnNames
# use the above list to create a DataFrame
timeHistory = | pd.DataFrame(columns=columnNames) | pandas.DataFrame |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), | u('value') | pandas.compat.u |
import numpy as np, pandas as pd, os
from ..measure.bootstrap import *
from ..measure.filter_topological_events import *
from ..measure.compute_forces_at_annihilation import *
# from ..utils.utils_traj import get_tips_in_range
import random
#####################################################
# Methods conditioned on data from topological events
#####################################################
def comp_mean_radial_velocities(df,input_fn,remove_before_jump,minR_thresh,max_speed_thresh,t_col='tdeath',id_col='event_id',
bins='auto',min_numobs=None,num_samples=1000,flip_time=False,use_smoothing=True,tavg1=1.5,tavg2=2.5,printing=False,**kwargs):
'''returns a dict containing results for mean radial velocities.
computes the mean radial velocities, binning by radius.
supposes df is from a .csv file containing annihilation or creation results,
where rows are presorted according to event and then by t_col.
minR_thresh is in cm and max_speed_thresh is in cm/ms.
navg is the number of frames to average over.
if min_numobs is None, then min_numobs is determined from the mean counts in each bin.
- output time units is in milliseconds
- output length units is the same as input length units
Example Usage:
dict_out=compute_mean_radial_velocities(df,t_col='tdeath')
'''
df.sort_values([id_col,t_col],ascending=False,inplace=True)
event_id_lst=sorted(set(df[id_col].values))
if flip_time:
df[t_col]=-1*df[t_col]
tvals=sorted(set(df[t_col].values))
DT=tvals[1]-tvals[0]
assert(DT>0)#if DT<0, then a factor of -1 is needed in a few places...
if not use_smoothing:
df['drdt']=df['r'].diff()/DT
#set drdt to zero where pid changes or where tdeath jumps by more than dt
# boo=df[t_col].diff()!=-DT
boo=~np.isclose(df[t_col].diff(),-DT,5)
if remove_before_jump:
#remove any observations occuring before a jump
book =(df['r']>=minR_thresh)&(np.abs(df['drdt'])>=max_speed_thresh)
for event_id in event_id_lst:
#identify any jumps in this event
booki =book&(df[id_col]==event_id)
if booki[booki].any(): #if there are any jumps
#identify the earliest time where a jump occurs
max_time=df[booki][t_col].min()
# filters all positions occuring before the final jump for a single annihilation event.
bookie=(df[id_col]==event_id)&(df[t_col]>=max_time) #True if a row should be dropped
#mark all data for this event to be dropped if it occurs earlier than this time (tdeath is larger than the earliest time)
boo |= bookie
# boo&=df['drdt']>0 #when this is uncommented, the data looks good. when it is commented, too much is filtered :(
df.loc[boo,'drdt']=np.nan
df.dropna(inplace=True)
else:
#perform smoothed differentiation for each event_id
navg1=int(tavg1/DT)
navg2=int(tavg2/DT)
if navg2%2==0:
navg2=navg2+1 #second window must be an odd integer
if printing:
print(f"using smoothing windows navg1,navg2={navg1,navg2}, corresponding to tavg1,tavg2=({navg1*DT:.3f},{navg2*DT:.3f}) ms...")
df,valid_event_id_lst=get_annihilation_df(input_fn,navg1,navg2,
t_col = t_col,id_col = id_col,DT = DT,DT_sec=DT*0.001,printing = printing,**kwargs)
#drop invalid events inplace
event_id_lst=sorted(set(df[id_col].values))
invalid_event_id_lst=list(set(event_id_lst).difference(set(valid_event_id_lst)))
for event_id in invalid_event_id_lst:
boo=df[id_col]==event_id
df.loc[boo,'drdt']=np.nan
df.dropna(inplace=True)
#implement measure of dRdt that explicitely bins by radius
counts,r_edges=np.histogram(df.r.values,bins=bins)
range_values=r_edges
if min_numobs is None:
min_numobs=np.mean(counts)/8
r_lst=[];drdt_lst=[];Delta_r_lst=[];Delta_drdt_lst=[];
count_lst=[];p_r_lst=[];p_drdt_lst=[]
for j in range(r_edges.shape[0]-1):
numobs=counts[j]
if numobs>min_numobs:
boo=(df.r>=r_edges[j])&(df.r<r_edges[j+1])
dfb=df[boo]
r_values=dfb.r.values
drdt_values=dfb.drdt.values
#compute mean values in bin
r=np.mean(r_values)
drdt=np.mean(drdt_values)
# compute 95% CI for mean
Delta_r,p_r=bootstrap_95CI_Delta_mean(r_values,
num_samples=num_samples)
Delta_drdt,p_drdt=bootstrap_95CI_Delta_mean(drdt_values,
num_samples=num_samples)
#append results to list
r_lst.append(r)
drdt_lst.append(drdt)
Delta_r_lst.append(Delta_r)
Delta_drdt_lst.append(Delta_drdt)
p_r_lst.append(p_r)
p_drdt_lst.append(p_drdt)
count_lst.append(numobs)
r_values=np.array(r_lst)
drdt_values=np.array(drdt_lst)
Delta_r_values=np.array(Delta_r_lst)
Delta_drdt_values=np.array(Delta_drdt_lst)
p_r_values=np.array(p_r_lst)
p_drdt_values=np.array(p_drdt_lst)
count_values=np.array(count_lst)
dict_out={
'r':r_values,
'drdt':drdt_values,
'Delta_r':Delta_r_values,
'Delta_drdt':Delta_drdt_values,
'p_r':p_r_values,
'p_drdt':p_drdt_values,
'counts':count_values
}
return dict_out
# def save_mean_radial_velocities(input_fn,t_col='tdeath',output_fn=None,bins='auto',flip_time=False,
# remove_before_jump=True,minR_thresh=0.25,max_speed_thresh=0.4,**kwargs):
def save_mean_radial_velocities(input_fn,remove_before_jump,minR_thresh,max_speed_thresh,t_col='tdeath',output_fn=None,bins='auto',flip_time=False,**kwargs):
if output_fn is None:
output_fn=input_fn.replace('.csv',f'_mean_radial_velocities_bins_{bins}_minRthresh_{minR_thresh}_maxspeedthresh_{max_speed_thresh}.csv')
df=pd.read_csv(input_fn)
dict_out=comp_mean_radial_velocities(df=df,input_fn=input_fn,t_col=t_col,bins=bins,
flip_time=flip_time,minR_thresh=minR_thresh,
remove_before_jump=remove_before_jump,
max_speed_thresh=max_speed_thresh,**kwargs)
df_drdt=pd.DataFrame(dict_out)
df_drdt.to_csv(output_fn,index=False)
return os.path.abspath(output_fn)
#########################################################
# Methods not conditioned on data from topological events
#########################################################
def get_ranges_to_others(xy_self,xy_others, pid_others, distance_L2_pbc,dist_thresh):
pid_lst = []
R_lst=[]
xy_self=xy_self[0]
for j,pid_other in enumerate(pid_others):
dist = distance_L2_pbc ( xy_others[j], xy_self)
if dist<dist_thresh:
pid_lst.append ( int(pid_other) )
R_lst.append ( float(dist) )
return np.array(R_lst), np.array(pid_lst)
def comp_radial_velocities_between_frames(df,frame,frame_nxt,distance_L2_pbc,dist_thresh,pid_col,
DS,DT,pid=None,use_forward_R=False):
#get data in the current frame
dff=df[df.frame==frame]
xy_values=dff[['x','y']].values
pid_values=dff[pid_col].values
if pid is None:
#(optional) randomly pick 1 particle from the current frame
pid=random.choice(pid_values)
#compute the range to each tip in the current frame
xy_self=xy_values[pid_values==pid]
boo=pid_values!=pid
pid_others=pid_values[boo]
xy_others=xy_values[boo]
# pid_in_range=get_tips_in_range(xy_self,xy_others, pid_others, distance_L2_pbc, dist_thresh=dist_thresh)
R_lst, pid_lst=get_ranges_to_others(xy_self,xy_others, pid_others, distance_L2_pbc,dist_thresh)
#get data in the next frame
dff=df[df.frame==frame_nxt]
xy_values=dff[['x','y']].values
pid_values=dff[pid_col].values
R_out_lst=[]
dRdt_out_lst=[]
#if the randomly selected particle is still present
if (pid==pid_values).any():
xy_self=xy_values[pid_values==pid]
#for each pid_other in pid_lst
for j,pid_other in enumerate(pid_lst):
#if the other particle is still present
boo=pid_other==pid_values
if boo.any():
#get the next location of that other particle
xy_other=xy_values[boo]
#compute the range between those tips in the next frame
R_nxt=distance_L2_pbc ( xy_other[0], xy_self[0])
R_prv=R_lst[j]
#compute dRdt and average R for those tips
dRdt_out=DS*(R_nxt-R_prv)/DT
#optionally, measure range from previous time point only
if use_forward_R:
R_out=R_prv
else:
R_out=DS*0.5*(R_nxt+R_prv)
#append results to list
R_out_lst.append(R_out)
dRdt_out_lst.append(dRdt_out)
#TODO(later, optionally): mark rows that have been visited as visited
return R_out_lst, dRdt_out_lst
def comp_neighboring_radial_velocities_between_frames(df,frame,num_frames_between,distance_L2_pbc,dist_thresh,DS,DT,pid_col='particle',**kwargs):
'''Computes radial velocities between frames, frame and frame_nxt, for particles that are nearest to eachother. Filters values when minimum R is larger than Rthresh
Double counting is removed using a method that rounds to 12 digits because of floating point arithmetic error. 14 seemed to work, but 12 is satisfies my paranoia more...
Example Usage:
R_values, dRdt_values = comp_neighboring_radial_velocities_between_frames(df,frame=frame,frame_nxt=frame+num_frames_between)
'''
frame_values=np.array(sorted(set(df.frame.values)))
#get data in the current frame
dff=df[df.frame==frame]
xy_values=dff[['x','y']].values
pid_values=dff[pid_col].values
R_lst=[];dRdt_lst=[]
for pid in pid_values:
_R_lst,_dRdt_lst = comp_radial_velocities_between_frames(df,frame=frame,frame_nxt=frame+num_frames_between,pid=pid,DS=DS,DT=DT,distance_L2_pbc=distance_L2_pbc,dist_thresh=dist_thresh,pid_col=pid_col)
if len(_R_lst)>0:
Rmin=np.min(_R_lst)
arg=np.argmin(_R_lst)
dRdtmin=_dRdt_lst[arg]
R_lst.append(Rmin)
dRdt_lst.append(dRdtmin)
#remove duplicates
d= | pd.DataFrame() | pandas.DataFrame |
import json
import pickle
import matlab
import scipy
import numpy as np
import os
import yaml
from EDL.dialogue.MatEngine_object import eng1
from EDL.dialogue.func_helpers import CalculateFuncs, ScorecardDataFrameFuncs, get_variable_info, correlation_multiprocessing
from EDL.models import EDLContextScorecards
from daphne_context.models import UserInformation
import pandas as pd
from multiprocessing import Pool
def load_mat_files(mission_name, mat_file, context: UserInformation):
file_path = os.path.join('/Users/ssantini/Code/EDL_Simulation_Files/', mission_name, mat_file)
context.edlcontext.current_mat_file = file_path
context.edlcontext.current_mat_file_for_print = mat_file
context.edlcontext.current_mission = mission_name
context.edlcontext.save()
context.save()
''' ---------------For MATLAB Engine ------------------'''
eng1.addpath(os.path.join('/Users/ssantini/Code/EDL_Simulation_Files/', mission_name), nargout = 0)
mat_file_engine = eng1.load(mat_file)
# TODO: ADD VARIABLE OF INTEREST TO ENGINE, NOT WHOLE MATFILE
# eng1.workspace['dataset'] = mat_file_engine
# eng1.disp('esto', nargout = 0)
print('The current mat_file is:')
print(mat_file)
return 'file loaded'
def mat_file_list(mission_name, context: UserInformation):
file_path = os.path.join('/Users/ssantini/Code/EDL_Simulation_Files/', mission_name)
mat_files = os.listdir(file_path)
result = []
for mat_file in mat_files:
result.append(
{
'command_result':mat_file
}
)
return result
def compute_stat(mission_name,mat_file, param_name, context: UserInformation):
eng1.addpath('/Volumes/Encrypted/Mars2020/mars2020/MATLAB/', nargout=0)
eng1.addpath('/Users/ssantini/Code/ExtractDataMatlab/MatlabEngine/', nargout=0)
if mission_name == 'None': # if query uses context, just use the file path in context
file_path = mat_file
else:
file_path = os.path.join('/Users/ssantini/Code/EDL_Simulation_Files/', mission_name, mat_file)
##################### CHECK IF IT IS A SCORECARD METRIC ###########################################
scorecard_query = EDLContextScorecards.objects.filter(scorecard_name__exact=os.path.basename(file_path).replace(".mat", ".yml"),
edl_context_id__exact=context.edlcontext.id)
if scorecard_query.count() > 0:
scorecard = scorecard_query.first()
scorecard_labeled = pickle.loads(scorecard.current_scorecard_df)
sub_df = scorecard_labeled.loc[scorecard_labeled['metric_name'].str.lower() == param_name]
if sub_df.shape[0] > 0:
units = sub_df['units'].ravel().tolist()[0]
calculation_string = sub_df['calculation'].ravel().tolist()[0]
list_for_load, warning = CalculateFuncs.equation_parser(calculation_string, mat_file)
''' Equations to calculate and remove the things left to the equal side '''
eqs_to_calc = calculation_string.split(';')
''' Load variables into workspace'''
[eng1.load(mat_file, item, nargout=0) for item in list_for_load] # load each
[eng1.workspace[item] for item in list_for_load] # add each to workspace
for item in eqs_to_calc:
eng1.eval(item, nargout=0)
val2 = eng1.workspace['ans']
param_array = np.array(val2)
warning = 'Scorecard Metric'
else:
warning = 'Not Scorecard Metric'
if warning == 'Not Scorecard Metric':
edl_mat_load = eng1.load(file_path, param_name, nargout=0) # loads in engine
dict_NL = json.load(open("/Users/ssantini/Code/ExtractDataMatlab/ExtractSimDataUsingNL/sim_data_dict.txt"))
for i in range(len(dict_NL)):
key = param_name
if key in dict_NL:
param_name = dict_NL[param_name][0] # this returns the value of the key (i.e. variable from matfile from NL)
else:
param_name = param_name
param_array = np.array(eng1.workspace[param_name])
val2 = eng1.workspace[param_name]
max = np.amax(param_array)
min = np.amin(param_array)
mean = np.mean(param_array)
variance = np.var(param_array)
std_dev = np.std(param_array)
plus_three_sig = np.mean(param_array) + 3 * np.std(param_array)
minus_three_sig = np.mean(param_array) - 3 * np.std(param_array)
percentile013 = np.percentile(param_array, 0.13)
percentile1 = np.percentile(param_array, 1)
percentile10 = np.percentile(param_array, 10)
percentile50 = np.percentile(param_array, 50)
percentile99 = np.percentile(param_array, 99)
percentile99_87 = np.percentile(param_array, 99.87)
high99_87_minus_median = np.percentile(param_array, 99.87) - np.median(param_array)
high99_87_minus_mean = np.percentile(param_array, 99.87) - np.median(param_array)
median_minus_low_99_87 = np.median(param_array) - np.percentile(param_array, 0.13)
mean_minus_low_99_87 = np.mean(param_array) - np.percentile(param_array, 0.13)
name_of_stat = ["max", "min", "mean", "variance", "std", "3s", "mean", "-3s", "0.13%", "1.00%", "10.00%", "50.00%",
"99.00%",
"99.87", "high 99.89 - median", "high 99.87 - mean", "median - low 99.87",
"mean - low 99.87"]
value_of_stat = [max, min, mean, variance, std_dev, plus_three_sig, mean, minus_three_sig, percentile013,
percentile1,
percentile10, percentile50, percentile99, percentile99_87, high99_87_minus_median,
high99_87_minus_mean,
median_minus_low_99_87, mean_minus_low_99_87]
'''Now we want to create a list as the one in the list sim data query'''
stat = []
for name, value in zip(name_of_stat, value_of_stat):
stat.append(
{
'command_result': " = ".join([name, value.astype(str)])
}
)
my_list = []
for _ in range(val2.size[1]):
my_list.append(val2._data[_ * val2.size[0]:_ * val2.size[0] + val2.size[0]].tolist())
return stat, my_list[0]
def load_scorecard(mission_name, mat_file, context: UserInformation):
# ''' Get Scorecard path'''
if mission_name == 'None':
file_to_search = os.path.basename(mat_file.replace(".mat", ".yml"))
else:
file_to_search = mat_file.replace(".mat", ".yml")
# ''' Check if scorecard exists in the Database'''
all_scorecards = EDLContextScorecards.objects
scorecard_query = EDLContextScorecards.objects.filter(scorecard_name__exact=file_to_search,
edl_context_id__exact=context.edlcontext.id)
if scorecard_query.count() > 0:
scorecard = scorecard_query.first()
return 'Scorecard already exists, and loaded'
# '''Check if scorecard exists already and just save scorecard path'''
if os.path.exists(os.path.join("/Users/ssantini/Code/Code_Daphne/daphne_brain/EDL/data/scorecards", file_to_search)) == True:
i = 1
scorecard_path = os.path.join('/Users/ssantini/Code/Code_Daphne/daphne_brain/EDL/data/scorecards', file_to_search)
else:
''' Set Paths:
1. mat file path; 2. the scorecard template path''
'''
if mission_name == 'None':
mat_file_path = mat_file # this is actually a path
else:
mat_file_path = os.path.join('/Users/ssantini/Code/EDL_Simulation_Files', mission_name, mat_file)
''' Connect to the local computer and generate scorecard'''
os.system('setenv DYLD_FALLBACK_LIBRARY_PATH $LD_LIBRARY_PATH')
os.system('~/scorecard.rb --help')
os.environ['MATLAB_PATH'] = "/Volumes/Encrypted/Mars2020/mars2020/MATLAB/"
os.system('pwd')
#print(os.environ['MATLAB_PATH'])
#os.system(('~/scorecard.rb --yaml --template="/Users/ssantini/Code/Code_Daphne/daphne_brain/daphne_API/edl/ScorecardMaterials/ScoreCardTemplate.xlsx"') + ' --path=' + mat_file_path)
os.system('~/scorecard.rb -y --template=/Volumes/Encrypted/Mars2020/mars2020/EDLccss/ScoreCardTemplate.xlsx --path=/Volumes/Encrypted/Mars2020/mars2020/MATLAB/' + ' ' + mat_file_path)
''' Rename the Scorecard to the mat file'''
scorecard_temp_path = mat_file.replace(".mat", "")
scorecard_name = os.path.basename(scorecard_temp_path)+'.yml'
scorecard_path = os.path.join('/Users/ssantini/Code/Code_Daphne/daphne_brain/EDL/data/scorecards', scorecard_name)
if os.path.isfile('/Users/ssantini/Code/Code_Daphne/daphne_brain/scorecard.yml'):
os.rename('/Users/ssantini/Code/Code_Daphne/daphne_brain/scorecard.yml', scorecard_path)
with open(scorecard_path, encoding='utf-8') as scorecard_file:
scorecard_dict = yaml.load(scorecard_file)
scorecard_df_labeled = pd.DataFrame(scorecard_dict)
scorecard_df_labeled = scorecard_df_labeled[~scorecard_df_labeled[':sheet'].str.contains("FLAG FAIL")]
scorecard_df_labeled = scorecard_df_labeled[~scorecard_df_labeled[':calculation'].str.contains('haz_filename')]
scorecard_df_labeled = scorecard_df_labeled[~scorecard_df_labeled[':calculation'].str.contains('lvs_error_x_fesn')]
scorecard_df_labeled.columns = ['metric_name', 'type', 'units', 'calculation', 'direction', 'flag', 'out_of_spec', 'evalString', 'post_results', 'color','status', 'sheet_name']
scorecard_df_labeled['status'] = scorecard_df_labeled['status'].replace([':grey', ':green'], 'ok')
scorecard_df_labeled['status'] = scorecard_df_labeled['status'].replace([':yellow'], 'flagged')
scorecard_df_labeled['status'] = scorecard_df_labeled['status'].replace([':red'], 'out_of_spec')
flagged_df = scorecard_df_labeled[scorecard_df_labeled.status == 'flagged']
out_of_spec_df = scorecard_df_labeled[scorecard_df_labeled.status == 'out_of_spec']
out_of_spec_arrays = ScorecardDataFrameFuncs.get_scorecard_arrays(out_of_spec_df, mat_file, False)
out_of_spec_df['arrays'] = out_of_spec_arrays
# db_template = ScorecardDataFrameFuncs.scorecard_df_for_db(mat_file_path, context)
scorecard_df_bytes = pickle.dumps(scorecard_df_labeled)
out_of_spec_df_bytes = pickle.dumps(out_of_spec_df)
flag_df_bytes = pickle.dumps(flagged_df)
# db_template = pickle.dumps(db_template)
metrics_of_interest = list(flagged_df['metric_name']) + list(out_of_spec_df['metric_name'])
context.edlcontext.current_metrics_of_interest = json.dumps(metrics_of_interest)
context.edlcontext.save()
new_scorecard = EDLContextScorecards(scorecard_name= os.path.basename(scorecard_path),
current_scorecard_path= scorecard_path,
current_scorecard_df = scorecard_df_bytes,
current_scorecard_df_flag = flag_df_bytes,
current_scorecard_df_fail = out_of_spec_df_bytes,
edl_context=context.edlcontext)
new_scorecard.save()
context.save()
return 'Score Card Loaded and Populated'
def get_scorecard_post_results(edl_scorecard, scorecard_post_param, context: UserInformation):
if edl_scorecard == 'None':
scorecard_query = EDLContextScorecards.objects.filter(
scorecard_name__exact=os.path.basename(context.edlcontext.current_mat_file).replace(".mat", ".yml"),
edl_context_id__exact=context.edlcontext.id)
if scorecard_query.count() > 0:
scorecard = scorecard_query.first()
scorecard_df = pickle.loads(scorecard.current_scorecard_df)
sub_df = scorecard_df.loc[scorecard_df['metric_name'].str.lower() == scorecard_post_param.lower()]
else:
current_scorecard = edl_scorecard.replace('.mat', 'yml')
with open(os.path.basename(edl_scorecard), encoding='utf-8') as scorecard_file:
scorecard_dict = yaml.load(scorecard_file)
scorecard_df = ScorecardDataFrameFuncs.generate_scorecard_dataframe(scorecard_dict)
'''Search in dictionary what is contained '''
possible_metrics = scorecard_df.metric_name.str.contains(str(scorecard_post_param), case = False, na = False)
indexes_in_df = possible_metrics[possible_metrics == True].index.tolist()
post_results = scorecard_df.iloc[indexes_in_df]
post_results_list = []
for row in post_results.itertuples():
post_results_list.append(
{
'command_result': " ".join([str(row.metric_name), str('='), str(row.post_results), str(row.units), "(",
str(row.type), ")"])
}
)
return post_results_list
def get_flag_summary(edl_scorecard, mat_file, context: UserInformation, *flag_type):
if edl_scorecard == 'None':
file_to_search = os.path.basename(mat_file.replace(".mat", ".yml"))
scorecard_query = EDLContextScorecards.objects.filter(scorecard_name__exact=file_to_search)
if scorecard_query.count() > 0:
scorecard = scorecard_query.first()
scorecard_df = scorecard.current_scorecard_df
flagged_df = pickle.loads(scorecard.current_scorecard_df_flag)
out_of_spec_df = pickle.loads(scorecard.current_scorecard_df_fail)
scorecard_df = pickle.loads(scorecard_df)
else:
scorecard_query = EDLContextScorecards.objects.filter(scorecard_name__exact=edl_scorecard)
if scorecard_query.count() > 0:
scorecard = scorecard_query.first()
scorecard_df = scorecard.current_scorecard_df
flagged_df = pickle.loads(scorecard.current_scorecard_df_flag)
out_of_spec_df = pickle.loads(scorecard.current_scorecard_df_fail)
scorecard_df = pickle.loads(scorecard_df)
''' Now we want to get what metrics are flagged and which are out of spec as a list'''
if 'flagged_results' in flag_type:
flagged_list = []
for row in flagged_df.itertuples():
flagged_list.append(
{
'command_result':" ".join([str(row.metric_name), str(row.post_results), str(row.units), "(",
str(row.type), ")",
str(row.direction), str(row.flag), str(row.units), 'is not satisfied'])
}
)
return flagged_list
if 'outofspec_results' in flag_type:
outofspec_list = []
for row in out_of_spec_df.itertuples():
outofspec_list.append(
{
'command_result': " ".join([str(row.metric_name), str(row.post_results), str(row.units), "(",
str(row.type), ")",
str(row.direction), str(row.out_of_spec), str(row.units),'is not satisfied'])
}
)
how_many_cases = 'test cases'
percent_cases = 'test percent'
context.edlcontext.current_scorecard_path
i = 1
return outofspec_list
# TODO: add function for calculating the metrics from a matfile (5007)
def calculate_scorecard_metric(mat_file, edl_scorecard_calculate, scorecard_post_param, context: UserInformation):
if not os.path.exists(os.path.dirname(mat_file)):
result = 'not a valid file path, probably just a name'
file_path = os.path.join('/Users/ssantini/Code/EDL_Simulation_Files/',context.edlcontext.current_mission, mat_file)
mat_file = file_path
else:
mat_file = mat_file
'''Get from the template the details of the metric being calculated'''
with open('/Users/ssantini/Code/Code_Daphne/daphne_brain/EDL/data/scorecard_materials/scorecard.json') as file:
scorecard_json = json.load(file)
for item in scorecard_json:
if item['metric'] == scorecard_post_param:
units = item['units']
type_result = item['type']
eval_string = item['evalString']
eng1.addpath('/Volumes/Encrypted/Mars2020/mars2020/MATLAB/', nargout=0)
eng1.addpath('/Users/ssantini/Code/ExtractDataMatlab/MatlabEngine/', nargout=0)
list_for_load, warning = CalculateFuncs.equation_parser(edl_scorecard_calculate, mat_file)
''' Equations to calculate and remove the things left to the equal side '''
eqs_to_calc = edl_scorecard_calculate.split(';')
''' Load variables into workspace'''
[eng1.load(mat_file, item, nargout=0) for item in list_for_load] # load each
[eng1.workspace[item] for item in list_for_load] # add each to workspace
for item in eqs_to_calc:
eng1.eval(item, nargout=0)
calculation_result = eng1.workspace['ans']
calculation_string = eng1.eval(eval_string)
calculation_response = "".join([str('The'), str(' '), str(scorecard_post_param), str(' = '), str(calculation_string),
str(units), str(" ("), str(type_result),str(") ")])
return calculation_response
# TODO: add function for plotting metrics (5010)
def plot_from_matfile(mat_file, param_name1, param_name2, context: UserInformation):
eng1.addpath('/Volumes/Encrypted/Mars2020/mars2020/MATLAB/', nargout=0)
eng1.addpath('/Users/ssantini/Code/ExtractDataMatlab/MatlabEngine/', nargout=0)
# param_name1 = 'windvert'
# param_name2 = 'peak inflation axial load'
file_to_search = os.path.basename(mat_file.replace(".mat", ".yml"))
scorecard_query = EDLContextScorecards.objects.filter(scorecard_name__exact=file_to_search)
if scorecard_query.count() > 0:
scorecard = scorecard_query.first()
complete_scorecard = pickle.loads(scorecard.current_scorecard_df)
out_of_spec_df = pickle.loads(scorecard.current_scorecard_df_fail)
''' Check if it is a matin file'''
scorecard_metrics = (complete_scorecard['metric_name']).tolist()
scorecard_metrics = [x.lower() for x in scorecard_metrics if x is not None]
list_metrics = [i[0] for i in scipy.io.whosmat(mat_file)]
if param_name1 not in list_metrics and param_name1 not in scorecard_metrics:
mat_file1 = os.path.basename(mat_file.replace(".mat", "_matin.mat"))
mat_file1 = os.path.join('/Users/ssantini/Code/EDL_Simulation_Files_Inputs/m2020/', mat_file1)
else:
mat_file1 = mat_file
if param_name2 not in list_metrics and param_name2 not in scorecard_metrics:
mat_file2 = os.path.basename(mat_file.replace(".mat", "_matin.mat"))
mat_file2 = os.path.join('/Users/ssantini/Code/EDL_Simulation_Files_Inputs/m2020/', mat_file2)
else:
mat_file2 = mat_file
variable_loc1, arr1, outofspec_indices1, fail_case_no1, flag_indices1, flag_case_no1 = get_variable_info.locate_variable(param_name1, complete_scorecard,
out_of_spec_df, mat_file1, 'all cases')
variable_loc2, arr2, outofspec_indices2, fail_case_no2, flag_indices2, flag_case_no2 = get_variable_info.locate_variable(param_name2,
complete_scorecard,
out_of_spec_df, mat_file2, 'all cases')
eng1.load(mat_file, 'output_case', nargout=0)
output_case = eng1.workspace['output_case']
output_case_list = (np.asarray(output_case)).tolist()
output_case_list = [item for sublist in output_case_list for item in sublist]
output_case_list = [str(i) for i in output_case_list]# flatten
val1 = [item for
sublist in arr1.tolist() for item in sublist]
val1 = matlab.double(val1)
val2 = [item for sublist in arr2.tolist() for item in sublist]
val2 = matlab.double(val2)
fail_cases_total = fail_case_no1.tolist() + fail_case_no2.tolist()
fail_cases_list = [str(i) for i in fail_cases_total]
fail_case_labels = [True if i in fail_cases_list else False for i in output_case_list]
flag_cases_total = flag_case_no1.tolist() + flag_case_no2.tolist()
flag_cases_list = [str(i) for i in flag_cases_total]
flag_case_labels = [True if i in flag_cases_list else False for i in output_case_list]
my_list = []
for _ in range(val2.size[1]):
if output_case is not None:
my_list.append((val1._data[_], val2._data[_], output_case._data[_], fail_case_labels[_], flag_case_labels[_]))
else:
my_list.append((val1._data[_], val2._data[_], None))
return my_list
def create_cormat(matout_path, context: UserInformation):
''' Start Engine
1. Get list of variables in da
taset
2. Set desired events for search
3. Remove irrelevant events
4. Create dataframe of remaining variables and compute the correlation matrix
'''
prueba = matout_path
eng1.load(matout_path, nargout = 0)
eng1.load(matout_path, 'output_case', nargout=0)
output_cases = [item for sublist in np.array(eng1.workspace['output_case']).tolist() for item in sublist] # will be used as dataframe indices
list_metrics_matout = scipy.io.whosmat(matout_path)
list_metrics_matout_clean = [i[0] for i in list_metrics_matout]
list_events = [words for segments in list_metrics_matout_clean for words in segments.split('_')[-1:]]
list_events = (list(set(list_events)))
# We had roughly 806 events. For an initial approach, we selected the events below. Which result in ~2,500 variables
sub_events = ['_dsi', 'fesn', 'AGLsample', '_ei', '_rc', '_rev1', '_end1', '_hda', '_sufr', '_pd', '_hs', '_bs', '_sky', '_td']
list_metrics_arm = []
for substring in sub_events:
list_variables = [s for s in list_metrics_matout_clean if substring in s]
list_metrics_arm.append(list_variables)
list_metrics_arm = [item for sublist in list_metrics_arm for item in sublist]
''' Get data from matout.mat and save as dataframe'''
my_list = []
for i in range(len(list_metrics_arm)):
arr = eng1.eval(list_metrics_arm[i])
my_list.append(np.asarray(arr._data))
matout_df = pd.DataFrame(my_list, index=list_metrics_arm, columns=output_cases).transpose()
matout_df.astype('float32')
matin_df_sorted = matout_df.reset_index()
num_partitions = 16 # no. partitions to split dataframe
num_cores = 8
corr_matrix = correlation_multiprocessing.parallelize_dataframe(matin_df_sorted, correlation_multiprocessing.vcorrcoef, list_metrics_arm)
cormat_df = | pd.DataFrame(corr_matrix, index=list_metrics_arm, columns=list_metrics_arm) | pandas.DataFrame |
# Dec 21 to mod for optional outputting original counts
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# Oct 19, 2021 ML after imputation test
# github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=3
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
#return temp.sum()>=2**(w-2) and tempw1.sum()>0
return temp.sum()>=2 and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def outwindow(pat,patori,pos,chrom,w,M,UM,Mo,UMo,mC=4,strand='f',optional=False):
# get complete reads
tempori=np.isnan(patori).sum(axis=1)==0
patori=patori[np.where(tempori)[0],:]
countori=np.zeros((2**w,1))
temp=np.isnan(pat).sum(axis=1)==0
pat=pat[np.where(temp)[0],:]
count=np.zeros((2**w,1))
# m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if optional:
patori = Counter([str(i[0])+str(i[1]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in patori.astype(int).tolist()])
countori = np.array([float(patori[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in patori.astype(int).tolist()])
countori = np.array([float(patori[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
countori=countori.reshape(2**w)
countori=np.concatenate((countori[[0]],countori))
if w==3 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==3 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==4 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==4 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==5 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==5 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'p17o':countori[17],'p18o':countori[18],'p19o':countori[19],'p20o':countori[20],\
'p21o':countori[21],'p22o':countori[22],'p23o':countori[23],'p24o':countori[24],'p25o':countori[25],\
'p26o':countori[26],'p27o':countori[27],'p28o':countori[28],'p29o':countori[29],'p30o':countori[30],\
'p31o':countori[31],'p32o':countori[32],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==6 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'M':M,'UM':UM,\
'strand':strand}, index=[0])
if w==6 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'p01o':countori[1],'p02o':countori[2],\
'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'p17o':countori[17],'p18o':countori[18],'p19o':countori[19],'p20o':countori[20],\
'p21o':countori[21],'p22o':countori[22],'p23o':countori[23],'p24o':countori[24],'p25o':countori[25],\
'p26o':countori[26],'p27o':countori[27],'p28o':countori[28],'p29o':countori[29],'p30o':countori[30],\
'p31o':countori[31],'p32o':countori[32],'p33o':countori[33],'p34o':countori[34],\
'p35o':countori[35],'p36o':countori[36],'p37o':countori[37],'p38o':countori[38],'p39o':countori[39],'p40o':countori[40],\
'p41o':countori[41],'p42o':countori[42],'p43o':countori[43],'p44o':countori[44],'p45o':countori[45],\
'p46o':countori[46],'p47o':countori[47],'p48o':countori[48],'p49o':countori[49],'p50o':countori[50],\
'p51o':countori[51],'p52o':countori[52],'p53o':countori[53],'p54o':countori[54],'p55o':countori[55],\
'p56o':countori[56],'p57o':countori[57],'p58o':countori[58],'p59o':countori[59],'p60o':countori[60],\
'p61o':countori[61],'p62o':countori[62],'p63o':countori[63],'p64o':countori[64],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,\
'strand':strand}, index=[0])
return opt
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def CGgenome_scr(bamfile,chrom,w,fa,mC=4,silence=False,optional=False,folder='MeHdata'):
filename, file_extension = os.path.splitext(bamfile)
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("%s/%s.bam" % (folder,filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('%s/%s.fa' % (folder,fa))
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if w==3 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','M','UM','strand'])
if w==4 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','M','UM','strand'])
if w==5 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'M','UM','Mo','UMo','strand'])
if w==6 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','M','UM','strand'])
if w==7 and not optional:
ResultPW = pd.DataFrame(columns=\
['chrom','pos','M','UM','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'p65','p66','p67','p68','p69','p70','p71','p72','p73','p74','p75','p76','p77','p78','p79','p80','p81','p82','p83','p84','p85','p86'\
,'p87','p88','p89','p90','p91','p92','p93','p94','p95','p96','p97','p98','p99','p100','p101','p102','p103','p104'\
,'p105','p106','p107','p108','p109','p120','p121','p122','p123','p124','p125','p126','p127','p128','strand'])
if w==3 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','M','UM','Mo','UMo','strand'])
if w==4 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','M','UM','Mo','UMo','strand'])
if w==5 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'M','UM','Mo','UMo','strand'])
if w==6 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'p33o','p34o','p35o','p36o','p37o','p38o','p39o','p40o','p41o','p42o','p43o','p44o','p45o','p46o',\
'p47o','p48o','p49o','p50o','p51o','p52o','p53o','p54o','p55o','p56o','p57o','p58o','p59o','p60o',\
'p61o','p62o','p63o','p64o','M','UM','Mo','UMo','strand'])
neverr = never = True
chrom_list = []
# all samples' bam files
for i in samfile.get_index_statistics():
chrom_list.append(i.contig)
if chrom in chrom_list:
# screen bamfile by column
for pileupcolumn in samfile.pileup(chrom):
coverage += 1
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
methtemp = meth.copy()
# imputation by sliding windows of w C by 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
# meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
# total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
# for i in range(0,meth.shape[1]-w+1,1):
# if i<w:
for i in range(0,w,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"%s/CG_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
windowold = meth.iloc[:,range(i,i+w)].values
window = methtemp.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"%s/CG_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"%s/CG_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
return filename, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,chrom,w,fa,mC=4,silence=False,optional=False,folder='MeHdata',minML=0.05):
filename, file_extension = os.path.splitext(bamfile)
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("%s/%s.bam" % (folder,filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('%s/%s.fa' % (folder,fa))
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if w==3 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','M','UM','strand'])
if w==4 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','M','UM','strand'])
if w==5 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'M','UM','Mo','UMo','strand'])
if w==6 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','M','UM','strand'])
if w==7 and not optional:
ResultPW = pd.DataFrame(columns=\
['chrom','pos','M','UM','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'p65','p66','p67','p68','p69','p70','p71','p72','p73','p74','p75','p76','p77','p78','p79','p80','p81','p82','p83','p84','p85','p86'\
,'p87','p88','p89','p90','p91','p92','p93','p94','p95','p96','p97','p98','p99','p100','p101','p102','p103','p104'\
,'p105','p106','p107','p108','p109','p120','p121','p122','p123','p124','p125','p126','p127','p128','strand'])
if w==3 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','M','UM','Mo','UMo','strand'])
if w==4 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','M','UM','Mo','UMo','strand'])
if w==5 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'M','UM','Mo','UMo','strand'])
if w==6 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'p33o','p34o','p35o','p36o','p37o','p38o','p39o','p40o','p41o','p42o','p43o','p44o','p45o','p46o',\
'p47o','p48o','p49o','p50o','p51o','p52o','p53o','p54o','p55o','p56o','p57o','p58o','p59o','p60o',\
'p61o','p62o','p63o','p64o','M','UM','Mo','UMo','strand'])
neverr = never = True
if samfile.is_valid_reference_name(chrom):
for pileupcolumn in samfile.pileup(chrom):
coverage += 1
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = methtemp.iloc[:,range(i,i+w)].values
windowold = meth.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = methtemp.iloc[:,range(i,i+w)].values
windowold = meth.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = methtemp.iloc[:,range(i,i+w)].values
windowold = meth.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
# check if enough complete patterns for evaluating MeH
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='f',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"%s/CHH_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = methtemp.iloc[:,range(i,i+w)].values
windowold = meth.iloc[:,range(i,i+w)].values
M=(window==1).sum(axis=0)[0]
UM=(window==0).sum(axis=0)[0]
Mo=(windowold==1).sum(axis=0)[0]
UMo=(windowold==0).sum(axis=0)[0]
depth=M+UM
if depth>=mC:
if M/depth > minML:
toappend=outwindow(window,patori=windowold,w=w,pos=meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,strand='r',mC=mC,M=M,UM=UM,Mo=Mo,UMo=UMo,optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"%s/CHH_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"%s/CHH_%s_%s.csv"%(folder,filename,chrom),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,chrom,w,fa,mC=4,silence=False,optional=False,folder='MeHdata',minML=0.05):
filename, file_extension = os.path.splitext(bamfile)
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("%s/%s.bam" % (folder,filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('%s/%s.fa' % (folder,fa))
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = | pd.DataFrame(columns=['Qname']) | pandas.DataFrame |
import pandas
import scipy.interpolate
import numpy as np
from ..j_utils.string import str2time, time2str
from ..j_utils.path import format_filepath
from collections import OrderedDict
class History:
"""
Store dataseries by iteration and epoch.
Data are index through timestamp: the number of iteration since the first iteration of the first epoch.
"""
def __init__(self):
self._timeline_series = OrderedDict()
self._timestamps = pandas.DataFrame(columns=['date', 'time'])
self._events = []
self._nb_iterations_by_epoch = [0]
self._current_epoch = 1
self._current_epoch_iteration = -1
def save(self, path):
path = format_filepath(path)
df = self.export_dataframe()
def load(self, path):
path = format_filepath(path)
# --- Current Iteration ---
@property
def epoch(self):
return self._current_epoch
@property
def iteration(self):
return self._current_epoch_iteration
@property
def last_timeid(self):
return sum(self._nb_iterations_by_epoch)
def __len__(self):
return self.last_timeid + 1
def next_iteration(self, time, date=None):
self._current_epoch_iteration += 1
self._nb_iterations_by_epoch[-1] = self._current_epoch_iteration
self._update_timestamp(time, date)
def next_epoch(self, time, date=None):
self._current_epoch += 1
self._current_epoch_iteration = 0
self._nb_iterations_by_epoch.append(0)
self._update_timestamp(time, date)
def _update_timestamp(self, time, date):
if date is None:
date = pandas.Timestamp.now()
date = pandas.to_datetime(date)
df = pandas.DataFrame([[time, date]], index=[self.last_timeid], columns=['time', 'date'])
self._timestamps = self._timestamps.append(df)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise KeyError('History key should be a serie name not (%s, type:%s).'
% (str(key), type(key)))
if key not in self._timeline_series:
serie = pandas.Series(data=[value], index=[self.last_timeid], name=key)
self._timeline_series[key] = serie
else:
self._timeline_series[key][self.last_timeid] = value
# --- Store/Read Data ---
def keys(self):
return self._timeline_series.keys()
def series(self, only_number=False):
keys = list(self.keys())
if only_number:
return [k for k in keys if self._timeline_series[k].dtype != 'O']
return keys
def __getitem__(self, item):
if isinstance(item, str):
if item not in self.keys():
raise KeyError('%s is an unknown serie name.' % item)
return self._timeline_series[item].iloc[-1]
elif isinstance(item, tuple):
if len(item) != 2 or item[0] not in self.keys():
raise KeyError("Invalid history index: %s\n"
"Index should follow the form: ['series name', time_index]" % repr(item))
series = item[0]
timeid = item[1]
if isinstance(timeid, slice):
df = self.read(series=series, start=timeid.start, stop=timeid.stop, step=timeid.step,
interpolation='previous', averaged=True, std=False)
return df[series].values
else:
return self.get(series=series, timeid=timeid, interpolation='previous')
raise IndexError('Invalid index: unable to read from history series')
def get(self, series, timeid=-1, interpolation='previous', default='raise exception'):
try:
t = self.interpret_timeid(timeid)
if series not in self.keys():
raise KeyError('%s is an unknown serie name.' % series)
except LookupError as e:
if default != 'raise exception':
return default
raise e from None
serie = self._timeline_series[series]
if interpolation is None:
try:
return serie.loc[t]
except KeyError:
if default != 'raise exception':
return default
raise IndexError("Serie %s doesn't store any data at time: %s.\n"
"The interpolation parameter may be use to remove this exception."
% (series, repr(timeid)))
else:
serie = scipy.interpolate.interp1d(x=serie.index, y=serie.values,
kind=interpolation, fill_value='extrapolate',
assume_sorted=True, copy=False)
return serie(timeid)
def read(self, series=None, start=0, stop=0, step=1, timestamp=None,
interpolation='previous', smooth=None, averaged=True, std=False):
"""
Interpolate or average
:param series: Keys of the variables to read
:type series: str or tuple or set
:param start: timestamp from which data should be read
:type start: int, TimeStamp, ...
:param stop: timestamp until which data should be read
:type stop: int, TimeStamp, ...
:param step: Interval between to sample
:type step: int, TimeStamp, ...
:param timestamp: Additional timestamp related columns. Acceptable values are:
- epoch
- iteration
- time
- date
:param interpolation: Specify which number serie should be interpolated and how.
NaN in number series can automatically be replaced by interpolated values using pandas interpolation algorithms.
This parameter most be one of those:
- True: All numbers series are interpolated linearly
- False: No interpolation is applied (NaN are not replaced)
- List of series name: The specified series are interpolated linearly
- Dictionary associating an interpolation method to a series name.
:param smooth: Specify which number series should be smoothed and how much.
Specified series are Savitzky-Golay filter of order 3. The window size may be chosen (default is 15).
:param averaged: Names of the time series whose values should be averaged along each step
instead of being naively down-sampled. Can only be applied on number series.
True means that all number series are be averaged and False means no series are.
:param std: Names of the averaged time series whose standard deviation should be computed.
A new columns is created for every of these time series with the name 'STD columnName'.
:return: time series
:rtype: pandas.DataFrame
"""
if stop is None:
stop = len(self)
indexes = np.array(list(self.timeid_iterator(start=start, stop=stop, step=step)), dtype=np.uint32)
intervals = np.stack((indexes, np.concatenate((indexes[1:], [stop]))), axis=1)
series_name = self.interpret_series_name(series)
if isinstance(averaged, bool):
averaged = self.series(only_number=True) if averaged else []
else:
averaged = self.interpret_series_name(averaged, only_number=True)
if isinstance(std, bool):
std = averaged if std else []
else:
if isinstance(std, str):
std = [std]
not_averaged_series = set(std).difference(averaged)
if not_averaged_series:
raise ValueError("Can't compute standard deviation of: %s.\n"
"Those series are not averaged." % repr(not_averaged_series))
if not interpolation:
interpolation = {}
elif isinstance(interpolation, bool):
interpolation = {_: 'linear' for _ in self.series(only_number=True)}
elif isinstance(interpolation, str):
if interpolation in self.series(only_number=True):
interpolation = {interpolation: 'linear'}
else:
interpolation = {_: interpolation for _ in self.series(only_number=True)}
elif isinstance(interpolation, (dict, OrderedDict)):
unknown_keys = set(interpolation.keys()).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't interpolate series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
else:
unknown_keys = set(interpolation).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't interpolate series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
interpolation = {_: 'linear' for _ in interpolation}
if not smooth:
smooth = {}
elif isinstance(smooth, bool):
smooth = {_: 15 for _ in self.series(only_number=True)}
elif isinstance(smooth, str):
if smooth not in self.series(only_number=True):
raise ValueError("Can't smooth series %s. It is either unknown or doesn't contain number!"
% smooth)
smooth = {smooth: 15}
elif isinstance(smooth, (dict, OrderedDict)):
unknown_keys = set(smooth.keys()).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't smooth series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
else:
unknown_keys = set(smooth).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't smooth series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
smooth = {_: 15 for _ in smooth}
if smooth:
import scipy.signal
df = []
for k in series_name:
series = self._timeline_series[k]
std_series = None
# Sample
if k in self.series(only_number=True):
if k not in averaged:
series = series.reindex(indexes, copy=False)
else:
mean_series = np.zeros(shape=(intervals.shape[0],))
std_series = np.zeros(shape=(intervals.shape[0],)) if k in std else None
for i, (start_id, end_id) in enumerate(intervals):
s = series.loc[start_id:end_id-1]
mean_series[i] = np.nanmean(s) if len(s) else np.nan
if std_series is not None:
std_series[i] = np.nanvar(s) if len(s) else np.nan
series = pandas.Series(index=indexes, data=mean_series, name=series.name)
if std_series is not None:
std_series = pandas.Series(index=indexes, data=std_series, name='STD '+series.name)
# Interpolate
if k in interpolation:
if interpolation[k] == 'previous':
series.fillna(method='pad', inplace=True)
if std_series is not None:
std_series.fillna(method='pad', inplace=True)
else:
series.interpolate(method=interpolation[k], inplace=True)
if std_series is not None:
std_series.interpolate(method=interpolation[k], inplace=True)
# Smooth
if k in smooth:
s = series.values
s = scipy.signal.savgol_filter(s, smooth[k], 3, mode='constant')
series = pandas.Series(index=indexes, data=s, dtype=series.dtype, name=series.name)
else:
series = series.reindex(indexes, copy=False, method='pad')
# Store
df.append(series)
if std_series is not None:
df.append(std_series)
if timestamp:
df = self.timestamp_dataframe(timestamp, indexes, series_list=True) + df
return pandas.DataFrame(df).transpose()
# --- Export ---
def export_dataframe(self, series=None, start=0, stop=0, timestamp=None):
"""
Export time series as a pandas DataFrame
:param series: Name of the series to export. None means all series.
:param start: Minimum time index of exported data
:param stop: Maximum time index of exported data
:param timestamp: Additional exported columns. Acceptable values are:
- epoch
- iteration
- time
- date
:rtype: pandas.DataFrame
"""
start = self.interpret_timeid(start)
stop = self.interpret_timeid(stop, stop_index=True)
series_name = self.interpret_series_name(series)
series = []
for k in series_name:
serie = self._timeline_series[k].loc[start:stop]
series.append(serie)
df = pandas.DataFrame(series).transpose()
if timestamp:
timestamp_df = self.timestamp_dataframe(timestamp, df.index)
df = pandas.concat([timestamp_df, df], axis=1)
return df
def export_csv(self, path, series=None, start=0, stop=0, timestamp=('epoch', 'iteration')):
df = self.export_dataframe(series=series, start=start, stop=stop, timestamp=timestamp)
df.to_csv(path_or_buf=path)
def export_CURView(self, path, series=None, start=0, stop=0):
def minibatch_count(e):
return self._nb_iterations_by_epoch[e-1]
df = self.export_dataframe(series=series, start=start, stop=stop, timestamp=['epoch', 'iteration'])
mini_count = df['epoch'].map(minibatch_count)
mini_count.name = 'number_of_minibatches'
df = | pandas.concat((df, mini_count), axis=1, copy=False) | pandas.concat |
import logging
import os
import time
import warnings
from datetime import date, datetime, timedelta
from io import StringIO
from typing import Dict, Iterable, List, Optional, Union
from urllib.parse import urljoin
import numpy as np
import pandas as pd
import requests
import tables
from pvoutput.consts import (
BASE_URL,
CONFIG_FILENAME,
ONE_DAY,
PV_OUTPUT_DATE_FORMAT,
RATE_LIMIT_PARAMS_TO_API_HEADERS,
)
from pvoutput.daterange import DateRange, merge_date_ranges_to_years
from pvoutput.exceptions import NoStatusFound, RateLimitExceeded
from pvoutput.utils import (
_get_param_from_config_file,
_get_response,
_print_and_log,
get_date_ranges_to_download,
sort_and_de_dupe_pv_system,
system_id_to_hdf_key,
)
_LOG = logging.getLogger("pvoutput")
class PVOutput:
"""
Attributes:
api_key
system_id
rate_limit_remaining
rate_limit_total
rate_limit_reset_time
data_service_url
"""
def __init__(
self,
api_key: str = None,
system_id: str = None,
config_filename: Optional[str] = CONFIG_FILENAME,
data_service_url: Optional[str] = None,
):
"""
Args:
api_key: Your API key from PVOutput.org.
system_id: Your system ID from PVOutput.org. If you don't have a
PV system then you can register with PVOutput.org and select
the 'energy consumption only' box.
config_filename: Optional, the filename of the .yml config file.
data_service_url: Optional. If you have subscribed to
PVOutput.org's data service then add the data service URL here.
This string must end in '.org'.
"""
self.api_key = api_key
self.system_id = system_id
self.rate_limit_remaining = None
self.rate_limit_total = None
self.rate_limit_reset_time = None
self.data_service_url = data_service_url
# Set from config file if None
for param_name in ["api_key", "system_id"]:
if getattr(self, param_name) is None:
try:
param_value_from_config = _get_param_from_config_file(
param_name, config_filename
)
except Exception as e:
msg = (
"Error loading configuration parameter {param_name}"
" from config file {filename}. Either pass"
" {param_name} into PVOutput constructor, or create"
" config file {filename}. {exception}".format(
param_name=param_name, filename=CONFIG_FILENAME, exception=e
)
)
print(msg)
_LOG.exception(msg)
raise
setattr(self, param_name, param_value_from_config)
# Convert to strings
setattr(self, param_name, str(getattr(self, param_name)))
# Check for data_service_url
if self.data_service_url is None:
try:
self.data_service_url = _get_param_from_config_file(
"data_service_url", config_filename
)
except KeyError:
pass
except FileNotFoundError:
pass
if self.data_service_url is not None:
if not self.data_service_url.strip("/").endswith(".org"):
raise ValueError("data_service_url must end in '.org'")
def search(
self,
query: str,
lat: Optional[float] = None,
lon: Optional[float] = None,
include_country: bool = True,
**kwargs
) -> pd.DataFrame:
"""Search for PV systems.
Some quirks of the PVOutput.org API:
- The maximum number of results returned by PVOutput.org is 30.
If the number of returned results is 30, then there is no
indication of whether there are exactly 30 search results,
or if there are more than 30. Also, there is no way to
request additional 'pages' of search results.
- The maximum search radius is 25km
Args:
query: string, see https://pvoutput.org/help.html#search
e.g. '5km'.
lat: float, e.g. 52.0668589
lon: float, e.g. -1.3484038
include_country: bool, whether or not to include the country name
with the returned postcode.
Returns:
pd.DataFrame, one row per search results. Index is PV system ID.
Columns:
name,
system_DC_capacity_W,
address, # If `include_country` is True then address is
# 'country> <postcode>',
# else address is '<postcode>'.
orientation,
num_outputs,
last_output,
panel,
inverter,
distance_km,
latitude,
longitude
"""
api_params = {"q": query, "country": int(include_country)}
if lat is not None and lon is not None:
api_params["ll"] = "{:f},{:f}".format(lat, lon)
pv_systems_text = self._api_query(service="search", api_params=api_params, **kwargs)
pv_systems = pd.read_csv(
StringIO(pv_systems_text),
names=[
"name",
"system_DC_capacity_W",
"address",
"orientation",
"num_outputs",
"last_output",
"system_id",
"panel",
"inverter",
"distance_km",
"latitude",
"longitude",
],
index_col="system_id",
)
return pv_systems
def get_status(
self, pv_system_id: int, date: Union[str, datetime], historic: bool = True, **kwargs
) -> pd.DataFrame:
"""Get PV system status (e.g. power generation) for one day.
The returned DataFrame will be empty if the PVOutput API
returns 'status 400: No status found'.
Args:
pv_system_id: int
date: str in format YYYYMMDD; or datetime
(localtime of the PV system)
Returns:
pd.DataFrame:
index: datetime (DatetimeIndex, localtime of the PV system)
columns: (all np.float64):
cumulative_energy_gen_Wh,
energy_efficiency_kWh_per_kW,
instantaneous_power_gen_W,
average_power_gen_W,
power_gen_normalised,
energy_consumption_Wh,
power_demand_W,
temperature_C,
voltage
"""
_LOG.info("system_id %d: Requesting system status for %s", pv_system_id, date)
date = date_to_pvoutput_str(date)
_check_date(date)
api_params = {
"d": date, # date, YYYYMMDD, localtime of the PV system
"h": int(historic == True), # We want historical data.
"limit": 288, # API limit is 288 (num of 5-min periods per day).
"ext": 0, # Extended data; we don't want extended data.
"sid1": pv_system_id, # SystemID.
}
try:
pv_system_status_text = self._api_query(
service="getstatus", api_params=api_params, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date %s", pv_system_id, date)
pv_system_status_text = ""
# See https://pvoutput.org/help.html#api-getstatus but make sure
# you read the 'History Query' subsection, as a historical query
# has slightly different return columns compared to a non-historical
# query!
columns = (
[
"cumulative_energy_gen_Wh",
"energy_efficiency_kWh_per_kW",
"instantaneous_power_gen_W",
"average_power_gen_W",
"power_gen_normalised",
"energy_consumption_Wh",
"power_demand_W",
"temperature_C",
"voltage",
]
if historic
else [
"cumulative_energy_gen_Wh",
"instantaneous_power_gen_W",
"energy_consumption_Wh",
"power_demand_W",
"power_gen_normalised",
"temperature_C",
"voltage",
]
)
pv_system_status = pd.read_csv(
StringIO(pv_system_status_text),
lineterminator=";",
names=["date", "time"] + columns,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime"],
dtype={col: np.float64 for col in columns},
).sort_index()
return pv_system_status
def get_batch_status(
self,
pv_system_id: int,
date_to: Optional[Union[str, datetime]] = None,
max_retries: Optional[int] = 1000,
**kwargs
) -> Union[None, pd.DataFrame]:
"""Get batch PV system status (e.g. power generation).
The returned DataFrame will be empty if the PVOutput API
returns 'status 400: No status found'.
Data returned is limited to the last 366 days per request.
To retrieve older data, use the date_to parameter.
The PVOutput getbatchstatus API is asynchronous. When it's first
called, it replies to say 'accepted'. This function will then
wait a minute and call the API again to see if the data is ready.
Set `max_retries` to 1 if you want to return immediately, even
if data isn't ready yet (and hence this function will return None)
https://pvoutput.org/help.html#dataservice-getbatchstatus
Args:
pv_system_id: int
date_to: str in format YYYYMMDD; or datetime
(localtime of the PV system). The returned timeseries will
include 366 days of data: from YYYY-1MMDD to YYYYMMDD inclusive
max_retries: int, number of times to retry after receiving
a '202 Accepted' request. Set `max_retries` to 1 if you want
to return immediately, even if data isn't ready yet (and hence
this function will return None).
Returns:
None (if data isn't ready after retrying max_retries times) or
pd.DataFrame:
index: datetime (DatetimeIndex, localtime of the PV system)
columns: (all np.float64):
cumulative_energy_gen_Wh,
instantaneous_power_gen_W,
temperature_C,
voltage
"""
api_params = {"sid1": pv_system_id}
_set_date_param(date_to, api_params, "dt")
for retry in range(max_retries):
try:
pv_system_status_text = self._api_query(
service="getbatchstatus", api_params=api_params, use_data_service=True, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date_to %s", pv_system_id, date_to)
pv_system_status_text = ""
break
if "Accepted 202" in pv_system_status_text:
if retry == 0:
_print_and_log("Request accepted.")
if retry < max_retries - 1:
_print_and_log("Sleeping for 1 minute.")
time.sleep(60)
else:
_print_and_log(
"Call get_batch_status again in a minute to see if" " results are ready."
)
else:
break
else:
return
return _process_batch_status(pv_system_status_text)
def get_metadata(self, pv_system_id: int, **kwargs) -> pd.Series:
"""Get metadata for a single PV system.
Args:
pv_system_id: int
Returns:
pd.Series. Index is:
name,
system_DC_capacity_W,
address,
num_panels,
panel_capacity_W_each,
panel_brand,
num_inverters,
inverter_capacity_W,
inverter_brand,
orientation,
array_tilt_degrees,
shade,
install_date,
latitude,
longitude,
status_interval_minutes,
secondary_num_panels,
secondary_panel_capacity_W_each,
secondary_orientation,
secondary_array_tilt_degrees
"""
pv_metadata_text = self._api_query(
service="getsystem",
api_params={
"array2": 1, # Provide data about secondary array, if present.
"tariffs": 0,
"teams": 0,
"est": 0,
"donations": 0,
"sid1": pv_system_id, # SystemID
"ext": 0, # Include extended data?
},
**kwargs
)
pv_metadata = pd.read_csv(
StringIO(pv_metadata_text),
lineterminator=";",
names=[
"name",
"system_DC_capacity_W",
"address",
"num_panels",
"panel_capacity_W_each",
"panel_brand",
"num_inverters",
"inverter_capacity_W",
"inverter_brand",
"orientation",
"array_tilt_degrees",
"shade",
"install_date",
"latitude",
"longitude",
"status_interval_minutes",
"secondary_num_panels",
"secondary_panel_capacity_W_each",
"secondary_orientation",
"secondary_array_tilt_degrees",
],
parse_dates=["install_date"],
nrows=1,
).squeeze()
pv_metadata["system_id"] = pv_system_id
pv_metadata.name = pv_system_id
return pv_metadata
def get_statistic(
self,
pv_system_id: int,
date_from: Optional[Union[str, date]] = None,
date_to: Optional[Union[str, date]] = None,
**kwargs
) -> pd.DataFrame:
"""Get summary stats for a single PV system.
Args:
pv_system_id: int
date_from
date_to
Returns:
pd.DataFrame:
total_energy_gen_Wh,
energy_exported_Wh,
average_daily_energy_gen_Wh,
minimum_daily_energy_gen_Wh,
maximum_daily_energy_gen_Wh,
average_efficiency_kWh_per_kW,
num_outputs, # The number of days for which there's >= 1 val.
actual_date_from,
actual_date_to,
record_efficiency_kWh_per_kW,
record_efficiency_date,
query_date_from,
query_date_to
"""
if date_from and not date_to:
date_to = pd.Timestamp.now().date()
if date_to and not date_from:
date_from = pd.Timestamp("1900-01-01").date()
api_params = {
"c": 0, # consumption and import
"crdr": 0, # credits / debits
"sid1": pv_system_id, # SystemID
}
_set_date_param(date_from, api_params, "df")
_set_date_param(date_to, api_params, "dt")
try:
pv_metadata_text = self._api_query(
service="getstatistic", api_params=api_params, **kwargs
)
except NoStatusFound:
pv_metadata_text = ""
columns = [
"total_energy_gen_Wh",
"energy_exported_Wh",
"average_daily_energy_gen_Wh",
"minimum_daily_energy_gen_Wh",
"maximum_daily_energy_gen_Wh",
"average_efficiency_kWh_per_kW",
"num_outputs",
"actual_date_from",
"actual_date_to",
"record_efficiency_kWh_per_kW",
"record_efficiency_date",
]
date_cols = ["actual_date_from", "actual_date_to", "record_efficiency_date"]
numeric_cols = set(columns) - set(date_cols)
pv_metadata = pd.read_csv(
StringIO(pv_metadata_text),
names=columns,
dtype={col: np.float32 for col in numeric_cols},
parse_dates=date_cols,
)
if pv_metadata.empty:
data = {col: np.float32(np.NaN) for col in numeric_cols}
data.update({col: pd.NaT for col in date_cols})
pv_metadata = pd.DataFrame(data, index=[pv_system_id])
else:
pv_metadata.index = [pv_system_id]
pv_metadata["query_date_from"] = pd.Timestamp(date_from) if date_from else pd.NaT
pv_metadata["query_date_to"] = pd.Timestamp(date_to) if date_to else | pd.Timestamp.now() | pandas.Timestamp.now |
# Importas bibliotecas necessarias
import streamlit as st
import pandas as pd
import numpy as np
import quandl as q
import base64
import plotly.express as px
from graf import plot, plotC
from datetime import date, datetime
# Listas para as tabelas e os dataframes
API = ['CEPEA/CALF','CEPEA/CALF_C','CEPEA/CATTLE','CEPEA/COTTON', 'CEPEA/EOR_DP','CEPEA/MILK',
'CEPEA/POR_DP','CEPEA/POR_NM','CEPEA/PORK','CEPEA/POULTRY_C', 'CEPEA/POULTRY_F','CEPEA/RICE',
'CEPEA/SOYBEAN','CEPEA/SOYBEAN_C','CEPEA/SUGAR','CEPEA/SUGAR_C',
'CEPEA/WHEAT_P','CEPEA/WHEAT_R']
Commoditys = ['CEPEA/CORN_C', 'CEPEA/COTTON_D', 'CEPEA/COFFEE_R', 'CEPEA/COFFEE_A', 'CEPEA/CORN']
Nome1 = ['Milho_C', 'Algodรฃo_D', 'Cafรฉ_R', 'Cafรฉ_A', 'Milho']
Nome = ['Bezerro','Bezerro_C','Gado', 'Algodรฃo', 'Laranja_Industrial','Leite',
'Laranaja_Industria_Precoce','Laranja_in_Natura','Porco','Frango_Congelado'
,'Frango_Frio','Arroz','Soja','Soja_C','Aรงucar','Aรงucar_C', 'Trigo_P', 'Trigo_R']
# Dados
@st.cache
def get_dados():
for (data_in_API, nome) in zip(API, Nome):
df = pd.DataFrame(q.get(data_in_API, authtoken="-zKjekRSoo3qjzzCVyUS", returns='numpy'), columns=['Date', 'Price US$'])
df.rename(columns={'Date':'Data', 'Price US$':'Preรงo US$'}, inplace=True)
df_name = nome
globals()[df_name] = | pd.DataFrame(df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = | Series(strs) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05_search.ipynb (unless otherwise specified).
__all__ = ['compare_frags', 'ppm_to_dalton', 'get_idxs', 'compare_spectrum_parallel', 'query_data_to_features',
'get_psms', 'frag_delta', 'intensity_fraction', 'add_column', 'remove_column', 'get_hits', 'score',
'LOSS_DICT', 'LOSSES', 'get_sequences', 'get_score_columns', 'plot_psms', 'store_hdf', 'search_db',
'search_fasta_block', 'mass_dict', 'filter_top_n', 'ion_extractor', 'search_parallel']
# Cell
import logging
from numba import njit
import numpy as np
@njit
def compare_frags(query_frag: np.ndarray, db_frag: np.ndarray, frag_tol: float, ppm:bool=False) -> np.ndarray:
"""Compare query and database frags and find hits
Args:
query_frag (np.ndarray): Array with query fragments.
db_frag (np.ndarray): Array with database fragments.
frag_tol (float): Fragment tolerance for search.
ppm (bool, optional): Use ppm as unit or Dalton. Defaults to False.
Returns:
np.ndarray: Array with reported hits.
"""
q_max = len(query_frag)
d_max = len(db_frag)
hits = np.zeros(d_max, dtype=np.int16)
q, d = 0, 0 # q > query, d > database
while q < q_max and d < d_max:
mass1 = query_frag[q]
mass2 = db_frag[d]
delta_mass = mass1 - mass2
if ppm:
sum_mass = mass1 + mass2
mass_difference = 2 * delta_mass / sum_mass * 1e6
else:
mass_difference = delta_mass
if abs(mass_difference) <= frag_tol:
hits[d] = q + 1 # Save query position +1 (zero-indexing)
d += 1
q += 1 # Only one query for each db element
elif delta_mass < 0:
q += 1
elif delta_mass > 0:
d += 1
return hits
# Cell
@njit
def ppm_to_dalton(mass:float, prec_tol:int)->float:
"""Function to convert ppm tolerances to Dalton.
Args:
mass (float): Base mass.
prec_tol (int): Tolerance.
Returns:
float: Tolerance in Dalton.
"""
return mass / 1e6 * prec_tol
# Cell
def get_idxs(db_masses:np.ndarray, query_masses:np.ndarray, prec_tol:float, ppm:bool)-> (np.ndarray, np.ndarray):
"""Function to get upper and lower limits to define search range for a given precursor tolerance.
Args:
db_masses (np.ndarray): Array containing database masses.
query_masses (np.ndarray): Array containing query masses.
prec_tol (float): Precursor tolerance for search.
ppm: Flag to use ppm instead of Dalton.
Returns:
(np.ndarray, np.ndarray): Indices to lower and upper bounds.
"""
if ppm:
dalton_offset = ppm_to_dalton(query_masses, prec_tol)
else:
dalton_offset = prec_tol
idxs_lower = db_masses.searchsorted(query_masses - dalton_offset, side="left")
idxs_higher = db_masses.searchsorted(query_masses + dalton_offset, side="right")
return idxs_lower, idxs_higher
# Cell
import alphapept.performance
@alphapept.performance.performance_function
def compare_spectrum_parallel(query_idx:int, query_masses:np.ndarray, idxs_lower:np.ndarray, idxs_higher:np.ndarray, query_indices:np.ndarray, query_frags:np.ndarray, query_ints:np.ndarray, db_indices:np.ndarray, db_frags:np.ndarray, best_hits:np.ndarray, score:np.ndarray, frag_tol:float, ppm:bool):
"""Compares a spectrum and writes to the best_hits and score.
Args:
query_idx (int): Integer to the query_spectrum that should be compared.
query_masses (np.ndarray): Array with query masses.
idxs_lower (np.ndarray): Array with indices for lower search boundary.
idxs_higher (np.ndarray): Array with indices for upper search boundary.
query_indices (np.ndarray): Array with indices to the query data.
query_frags (np.ndarray): Array with frag types of the query data.
query_ints (np.ndarray): Array with fragment intensities from the query.
db_indices (np.ndarray): Array with indices to the database data.
db_frags (np.ndarray): Array with frag types of the db data.
best_hits (np.ndarray): Reporting array which stores indices to the best hits.
score (np.ndarray): Reporting array that stores the scores of the best hits.
frag_tol (float): Fragment tolerance for search.
ppm (bool): Flag to use ppm instead of Dalton.
"""
idx_low = idxs_lower[query_idx]
idx_high = idxs_higher[query_idx]
query_idx_start = query_indices[query_idx]
query_idx_end = query_indices[query_idx + 1]
query_frag = query_frags[query_idx_start:query_idx_end]
query_int = query_ints[query_idx_start:query_idx_end]
query_int_sum = 0
for qi in query_int:
query_int_sum += qi
for db_idx in range(idx_low, idx_high):
db_idx_start = db_indices[db_idx]
db_idx_next = db_idx +1
db_idx_end = db_indices[db_idx_next]
db_frag = db_frags[db_idx_start:db_idx_end]
q_max = len(query_frag)
d_max = len(db_frag)
hits = 0
q, d = 0, 0 # q > query, d > database
while q < q_max and d < d_max:
mass1 = query_frag[q]
mass2 = db_frag[d]
delta_mass = mass1 - mass2
if ppm:
sum_mass = mass1 + mass2
mass_difference = 2 * delta_mass / sum_mass * 1e6
else:
mass_difference = delta_mass
if abs(mass_difference) <= frag_tol:
hits += 1
hits += query_int[q]/query_int_sum
d += 1
q += 1 # Only one query for each db element
elif delta_mass < 0:
q += 1
elif delta_mass > 0:
d += 1
len_ = best_hits.shape[1]
for i in range(len_):
if score[query_idx, i] < hits:
# This is mean to report the hit in our top-n array
# The code below looks weird but is necessary to be used with cuda
# It should be equivalent to this code:
#score_slice = score[query_idx, i:(len_-1)]
#hit_slice = best_hits[query_idx, i:(len_-1)]
#score[query_idx, (i+1):len_] = score_slice
#best_hits[query_idx, (i+1):len_] = hit_slice
j = 1
while len_-j >= (i+1):
k = len_-j
score[query_idx, k] = score[query_idx, k-1]
best_hits[query_idx, k] = best_hits[query_idx, k-1]
j+=1
score[query_idx, i] = hits
best_hits[query_idx, i] = db_idx
break
# Cell
import pandas as pd
import logging
from .fasta import read_database
def query_data_to_features(query_data: dict)->pd.DataFrame:
"""Helper function to extract features from query data.
This is used when the feature finder will not be used.
Args:
query_data (dict): Data structure containing the query data.
Returns:
pd.DataFrame: Pandas dataframe so that it can be used for subsequent processing.
"""
query_masses = query_data['prec_mass_list2']
query_mz = query_data['mono_mzs2']
query_rt = query_data['rt_list_ms2']
features = pd.DataFrame(np.array([query_masses, query_mz, query_rt]).T, columns = ['mass_matched', 'mz_matched', 'rt_matched'])
features['feature_idx'] = features.index #Index to query_data
features['query_idx'] = np.arange(len(query_masses))
features = features.sort_values('mass_matched', ascending=True)
return features
# Cell
from typing import Callable
#this wrapper function is covered by the quick_test
def get_psms(
query_data: dict,
db_data: dict,
features: pd.DataFrame,
parallel: bool,
frag_tol: float,
prec_tol: float,
ppm: bool,
min_frag_hits: int,
callback: Callable = None,
prec_tol_calibrated:float = None,
frag_tol_calibrated:float = None,
**kwargs
)->(np.ndarray, int):
"""[summary]
Args:
query_data (dict): Data structure containing the query data.
db_data (dict): Data structure containing the database data.
features (pd.DataFrame): Pandas dataframe containing feature data.
parallel (bool): Flag to use parallel processing.
frag_tol (float): Fragment tolerance for search.
prec_tol (float): Precursor tolerance for search.
ppm (bool): Flag to use ppm instead of Dalton.
min_frag_hits (int): Minimum number of frag hits to report a PSMs.
callback (Callable, optional): Optional callback. Defaults to None.
prec_tol_calibrated (float, optional): Precursor tolerance if calibration exists. Defaults to None.
frag_tol_calibrated (float, optional): Fragment tolerance if calibration exists. Defaults to None.
Returns:
np.ndarray: Numpy recordarray storing the PSMs.
int: 0
"""
if isinstance(db_data, str):
db_masses = read_database(db_data, array_name = 'precursors')
db_frags = read_database(db_data, array_name = 'fragmasses')
db_indices = read_database(db_data, array_name = 'indices')
else:
db_masses = db_data['precursors']
db_frags = db_data['fragmasses']
db_indices = db_data['indices']
query_indices = query_data["indices_ms2"]
query_frags = query_data['mass_list_ms2']
query_ints = query_data['int_list_ms2']
if frag_tol_calibrated:
frag_tol = frag_tol_calibrated
if features is not None:
if prec_tol_calibrated:
prec_tol = prec_tol_calibrated
query_masses = features['corrected_mass'].values
else:
query_masses = features['mass_matched'].values
query_mz = features['mz_matched'].values
query_rt = features['rt_matched'].values
query_selection = features['query_idx'].values
indices = np.zeros(len(query_selection) + 1, np.int64)
indices[1:] = np.diff(query_indices)[query_selection]
indices = np.cumsum(indices)
query_frags = np.concatenate(
[
query_frags[s: e] for s, e in zip(
query_indices[query_selection], query_indices[query_selection + 1]
)
]
)
query_ints = np.concatenate(
[
query_ints[s: e] for s, e in zip(
query_indices[query_selection], query_indices[query_selection + 1]
)
]
)
query_indices = indices
else:
if prec_tol_calibrated:
prec_tol = prec_tol_calibrated
query_masses = query_data['prec_mass_list2']
query_mz = query_data['mono_mzs2']
query_rt = query_data['rt_list_ms2']
idxs_lower, idxs_higher = get_idxs(
db_masses,
query_masses,
prec_tol,
ppm
)
n_queries = len(query_masses)
n_db = len(db_masses)
top_n = 5
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
idxs_lower = cupy.array(idxs_lower)
idxs_higher = cupy.array(idxs_higher)
query_indices = cupy.array(query_indices)
query_ints = cupy.array(query_ints)
query_frags = cupy.array(query_frags)
db_indices = cupy.array(db_indices)
db_frags = cupy.array(db_frags)
db_frags = cupy.array(db_frags)
else:
import numpy
cupy = numpy
best_hits = cupy.zeros((n_queries, top_n), dtype=cupy.int_)-1
score = cupy.zeros((n_queries, top_n), dtype=cupy.float_)
logging.info(f'Performing search on {n_queries:,} query and {n_db:,} db entries with frag_tol = {frag_tol:.2f} and prec_tol = {prec_tol:.2f}.')
compare_spectrum_parallel(cupy.arange(n_queries), cupy.arange(n_queries), idxs_lower, idxs_higher, query_indices, query_frags, query_ints, db_indices, db_frags, best_hits, score, frag_tol, ppm)
query_idx, db_idx_ = cupy.where(score > min_frag_hits)
db_idx = best_hits[query_idx, db_idx_]
score_ = score[query_idx, db_idx_]
if cupy.__name__ != 'numpy':
query_idx = query_idx.get()
db_idx = db_idx.get()
score_ = score_.get()
psms = np.array(
list(zip(query_idx, db_idx, score_)), dtype=[("query_idx", int), ("db_idx", int), ("hits", float)]
)
logging.info('Found {:,} psms.'.format(len(psms)))
return psms, 0
# Cell
@njit
def frag_delta(query_frag:np.ndarray, db_frag:np.ndarray, hits:np.ndarray)-> (float, float):
"""Calculates the mass difference for a given array of hits in Dalton and ppm.
Args:
query_frag (np.ndarray): Array with query fragments.
db_frag (np.ndarray): Array with database fragments.
hits (np.ndarray): Array with reported hits.
Returns:
float: Fragment deltas in Dalton.
float: Fragment deltas in ppm.
"""
delta_m = db_frag[hits > 0] - query_frag[hits[hits > 0] - 1]
delta_m_ppm = (
2 * delta_m / (db_frag[hits > 0] + query_frag[hits[hits > 0] - 1]) * 1e6
)
return delta_m, delta_m_ppm
# Cell
@njit
def intensity_fraction(query_int:np.ndarray, hits:np.ndarray)->float:
"""Calculate the fraction of matched intensity
Args:
query_int (np.ndarray): Array with query intensities.
hits (np.ndarray): Array with reported hits.
Returns:
float: Fraction of the matched intensity to the total intensity.
"""
total_intensity = np.sum(query_int)
if total_intensity != 0:
matched_intensity = np.sum(query_int[hits[hits > 0] - 1])
i_frac = matched_intensity / total_intensity
else:
i_frac = 0
return i_frac
# Cell
from numpy.lib.recfunctions import append_fields, drop_fields
def add_column(recarray:np.ndarray, column:np.ndarray, name:str)->np.ndarray:
"""Function to add a column with given name to recarray
Args:
recarray (np.ndarray): NumPy record array.
column (np.ndarray): Data column that should be added to the record array.
name (str): Name of the column in the new recordarray.
Returns:
np.ndarray: NumPy recordarray with new field.
"""
if hasattr(recarray, name):
recarray = drop_fields(recarray, name, usemask=False, asrecarray=True)
recarray = append_fields(
recarray, name, column, dtypes=column.dtype, usemask=False, asrecarray=True
)
return recarray
def remove_column(recarray:np.ndarray, name:str)->np.ndarray:
"""Function to remove a column from a recarray.
Args:
recarray (np.ndarray): NumPy record array.
name (str): Column name of the column to be removed.
Returns:
np.ndarray: NumPy record array with removed column.
"""
if hasattr(recarray, name):
recarray = drop_fields(recarray, name, usemask=False, asrecarray=True)
return recarray
# Cell
from numba.typed import List
@njit
def get_hits(query_frag:np.ndarray, query_int:np.ndarray, db_frag:np.ndarray, db_int:np.ndarray, frag_type:np.ndarray, mtol:float, ppm:bool, losses:list)-> np.ndarray:
"""Function to extract the types of hits based on a single PSMs.
The reporting array stores information about the matched ions column wise:
Column 0: Type of the ion.
Column 1: Ion-index refering to what ion type was matched.
Column 2: Intensity of the matched ion.
Column 3: Intensity of the database ion.
Column 4: Experimental mass of the ion.
Column 5: Theoretical mass of the ion.
Column 6: Index to the query_frag of the ion.
Column 7: Index to the database_frag of the ion.
Args:
query_frag (np.ndarray): Array with query fragments.
query_int (np.ndarray): Array with query intensities.
db_frag (np.ndarray): Array with database fragments.
db_int (np.ndarray): Array with database intensities.
frag_type (np.ndarray): Array with fragment types.
mtol (float): Mass tolerance.
ppm (bool): Flag to use ppm instead of Dalton.
losses (list): List of losses.
Returns:
np.ndarray: NumPy array that stores ion information.
"""
max_array_size = len(db_frag)*len(losses)
ions = np.zeros((max_array_size, 9))
pointer = 0
query_range = np.arange(len(query_frag))
db_range = np.arange(len(db_frag))
for idx, off in enumerate(losses):
hits = compare_frags(query_frag, db_frag-off, mtol, ppm)
n_hits = np.sum(hits>0)
hitpos = hits[hits > 0] - 1
hit = hits > 0
ions[pointer:pointer+n_hits,0] = frag_type[hits>0] #type
ions[pointer:pointer+n_hits,1] = idx #ion-index
ions[pointer:pointer+n_hits,2] = query_int[hitpos] #query int
ions[pointer:pointer+n_hits,3] = db_int[hit] #db int
ions[pointer:pointer+n_hits,4] = query_frag[hitpos] #query mass
ions[pointer:pointer+n_hits,5] = db_frag[hit]-off # db mass
ions[pointer:pointer+n_hits,6] = query_range[hitpos] # index to query entry
ions[pointer:pointer+n_hits,7] = db_range[hit] # index to db entry
pointer += n_hits
ions = ions[:pointer,:]
return ions
# Cell
from alphapept import constants
LOSS_DICT = constants.loss_dict
LOSSES = np.array(list(LOSS_DICT.values()))
#This function is a wrapper and ist tested by the quick_test
@njit
def score(
psms: np.recarray,
query_masses: np.ndarray,
query_masses_raw: np.ndarray,
query_frags: np.ndarray,
query_ints: np.ndarray,
query_indices: np.ndarray,
db_masses: np.ndarray,
db_frags: np.ndarray,
frag_types: np.ndarray,
mtol: float,
db_indices: np.ndarray,
ppm: bool,
psms_dtype: list,
db_ints: np.ndarray = None,
parallel: bool = False
) -> (np.ndarray, np.ndarray):
"""Function to extract score columns when giving a recordarray with PSMs.
Args:
psms (np.recarray): Recordarray containing PSMs.
query_masses (np.ndarray): Array with query masses.
query_masses_raw (np.ndarray): Array with raw query masses.
query_frags (np.ndarray): Array with frag types of the query data.
query_ints (np.ndarray): Array with fragment intensities from the query.
query_indices (np.ndarray): Array with indices to the query data.
db_masses (np.ndarray): Array with database masses.
db_frags (np.ndarray): Array with fragment masses.
frag_types (np.ndarray): Array with fragment types.
mtol (float): Mass tolerance.
db_indices (np.ndarray): Array with indices to the database array.
ppm (bool): Flag to use ppm instead of Dalton.
psms_dtype (list): List describing the dtype of the PSMs record array.
db_ints (np.ndarray, optional): Array with database intensities. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Defaults to False.
Returns:
np.recarray: Recordarray containing PSMs with additional columns.
np.ndarray: NumPy array containing ion information.
"""
psms_ = np.zeros(len(psms), dtype=psms_dtype)
ions_ = List()
ion_count = 0
for i in range(len(psms)):
query_idx = psms[i]["query_idx"]
db_idx = psms[i]["db_idx"]
query_idx_start = query_indices[query_idx]
query_idx_end = query_indices[query_idx + 1]
query_frag = query_frags[query_idx_start:query_idx_end]
query_int = query_ints[query_idx_start:query_idx_end]
db_frag = db_frags[db_indices[db_idx]:db_indices[db_idx+1]]
frag_type = frag_types[db_indices[db_idx]:db_indices[db_idx+1]]
if db_ints is None:
db_int = np.ones(len(db_frag))
else:
db_int = db_ints[i]
ions = get_hits(query_frag, query_int, db_frag, db_int, frag_type, mtol, ppm, LOSSES)
psms_['prec_offset'][i] = query_masses[query_idx] - db_masses[db_idx]
psms_['prec_offset_ppm'][i] = 2 * psms_['prec_offset'][i] / (query_masses[query_idx] + db_masses[db_idx] ) * 1e6
psms_['prec_offset_raw '][i] = query_masses_raw[query_idx] - db_masses[db_idx]
psms_['prec_offset_raw_ppm '][i] = 2 * psms_['prec_offset'][i] / (query_masses_raw[query_idx] + db_masses[db_idx] ) * 1e6
psms_['delta_m'][i] = np.mean(ions[:,4]-ions[:,5])
psms_['delta_m_ppm'][i] = np.mean(2 * psms_['delta_m'][i] / (ions[:,4] + ions[:,5] ) * 1e6)
psms_['total_int'][i] = np.sum(query_int)
psms_['matched_int'][i] = np.sum(ions[:,2])
psms_['matched_int_ratio'][i] = psms_['matched_int'][i] / psms_['total_int'][i]
psms_['int_ratio'][i] = np.mean(ions[:,2]/ions[:,3]) #3 is db_int, 2 is query_int
psms_['b_hits'][i] = np.sum(ions[ions[:,1]==0][:,0]>0)
psms_['y_hits'][i] = np.sum(ions[ions[:,1]==0][:,0]<0)
psms_['b-H2O_hits'][i] = np.sum(ions[ions[:,1]==1][:,0]>0)
psms_['y-H2O_hits'][i] = np.sum(ions[ions[:,1]==1][:,0]<0)
psms_['b-NH3_hits'][i] = np.sum(ions[ions[:,1]==2][:,0]>0)
psms_['y-NH3_hits'][i] = np.sum(ions[ions[:,1]==2][:,0]<0)
n_ions = len(ions)
psms_['n_ions'][i] = n_ions
psms_['ion_idx'][i] = ion_count
ion_count += n_ions
ions[:,8] = i #Save psms index
ions_.append(ions)
return psms_, ions_
# Cell
from numba.typed import Dict
def get_sequences(psms: np.recarray, db_seqs:np.ndarray)-> np.ndarray:
"""Get sequences to add them to a recarray
Args:
psms (np.recarray): Recordarray containing PSMs.
db_seqs (np.ndarray): NumPy array containing sequences.
Returns:
np.ndarray: NumPy array containing a subset of sequences.
"""
sequence_list = db_seqs[psms["db_idx"]]
return sequence_list
# Cell
from typing import Union
#This function is a wrapper and ist tested by the quick_test
def get_score_columns(
psms: np.recarray,
query_data: dict,
db_data: Union[dict, str],
features: pd.DataFrame,
parallel:bool,
frag_tol:float,
prec_tol:float,
ppm:bool,
prec_tol_calibrated:Union[None, float]=None,
frag_tol_calibrated:float = None,
**kwargs
) -> (np.ndarray, np.ndarray):
"""Wrapper function to extract score columns.
Args:
psms (np.recarray): Recordarray containing PSMs.
query_data (dict): Data structure containing the query data.
db_data: Union[dict, str]: Data structure containing the database data or path to database.
features (pd.DataFrame): Pandas dataframe containing feature data.
parallel (bool): Flag to use parallel processing.
frag_tol (float): Fragment tolerance for search.
prec_tol (float): Precursor tolerance for search.
ppm (bool): Flag to use ppm instead of Dalton.
prec_tol_calibrated (Union[None, float], optional): Calibrated offset mass. Defaults to None.
frag_tol_calibrated (float, optional): Fragment tolerance if calibration exists. Defaults to None.
Returns:
np.recarray: Recordarray containing PSMs with additional columns.
np.ndarray: NumPy array containing ion information.
"""
logging.info('Extracting columns for scoring.')
query_indices = query_data["indices_ms2"]
query_charges = query_data['charge2']
query_frags = query_data['mass_list_ms2']
query_ints = query_data['int_list_ms2']
query_scans = query_data['scan_list_ms2']
if frag_tol_calibrated:
frag_tol = frag_tol_calibrated
if 'prec_id2' in query_data.keys():
bruker = True
query_prec_id = query_data['prec_id2']
else:
bruker = False
if isinstance(db_data, str):
db_masses = read_database(db_data, array_name = 'precursors')
db_frags = read_database(db_data, array_name = 'fragmasses')
db_indices = read_database(db_data, array_name = 'indices')
frag_types = read_database(db_data, array_name = 'fragtypes')
try:
db_ints = read_database(db_data, array_name = 'db_ints')
except KeyError:
db_ints = None
else:
db_masses = db_data['precursors']
db_frags = db_data['fragmasses']
db_indices = db_data['indices']
frag_types = db_data['fragtypes']
if 'db_ints' in db_data.keys():
db_ints = db_data['db_ints']
else:
db_ints = None
if features is not None:
if prec_tol_calibrated:
query_masses = features['corrected_mass'].values
else:
query_masses = features['mass_matched'].values
query_masses_raw = features['mass_matched'].values
query_mz = features['mz_matched'].values
query_rt = features['rt_matched'].values
query_charges = features['charge_matched'].values
query_scans = query_scans[features['query_idx'].values]
if bruker:
query_prec_id = query_prec_id[features['query_idx'].values]
query_selection = features['query_idx'].values
indices = np.zeros(len(query_selection) + 1, np.int64)
indices[1:] = np.diff(query_indices)[query_selection]
indices = np.cumsum(indices)
query_frags = np.concatenate(
[
query_frags[s: e] for s, e in zip(
query_indices[query_selection], query_indices[query_selection + 1]
)
]
)
query_ints = np.concatenate(
[
query_ints[s: e] for s, e in zip(
query_indices[query_selection], query_indices[query_selection + 1]
)
]
)
query_indices = indices
else:
#TODO: This code is outdated, callin with features = None will crash.
query_masses = query_data['prec_mass_list2']
query_masses_raw = query_data['prec_mass_list2']
query_mz = query_data['mono_mzs2']
query_rt = query_data['rt_list_ms2']
float_fields = ['prec_offset', 'prec_offset_ppm', 'prec_offset_raw ','prec_offset_raw_ppm ','delta_m','delta_m_ppm','matched_int_ratio','int_ratio']
int_fields = ['total_int','matched_int','n_ions','ion_idx'] + [a+_+'_hits' for _ in LOSS_DICT for a in ['b','y']]
psms_dtype = np.dtype([(_,np.float32) for _ in float_fields] + [(_,np.int64) for _ in int_fields])
psms_, ions, = score(
psms,
query_masses,
query_masses_raw,
query_frags,
query_ints,
query_indices,
db_masses,
db_frags,
frag_types,
frag_tol,
db_indices,
ppm,
psms_dtype)
ions_ = np.vstack(ions)
for _ in psms_.dtype.names:
psms = add_column(psms, psms_[_], _)
rts = np.array(query_rt)[psms["query_idx"]]
psms = add_column(psms, rts, 'rt')
if isinstance(db_data, str):
db_seqs = read_database(db_data, array_name = 'seqs').astype(str)
else:
db_seqs = db_data['seqs']
seqs = get_sequences(psms, db_seqs)
del db_seqs
psms = add_column(psms, seqs, "sequence")
mass = np.array(query_masses)[psms["query_idx"]]
mz = np.array(query_mz)[psms["query_idx"]]
charge = np.array(query_charges)[psms["query_idx"]]
psms = add_column(psms, mass, "mass")
psms = add_column(psms, mz, "mz")
psms = add_column(psms, charge, "charge")
psms = add_column(psms, np.char.add(np.char.add(psms['sequence'],"_"), psms['charge'].astype(int).astype(str)), 'precursor')
if features is not None:
psms = add_column(psms, features.loc[psms['query_idx']]['feature_idx'].values, 'feature_idx')
psms = add_column(psms, features.loc[psms['query_idx']]['query_idx'].values, 'raw_idx')
for key in ['int_sum','int_apex','rt_start','rt_apex','rt_end','fwhm','dist','mobility']:
if key in features.keys():
psms = add_column(psms, features.loc[psms['query_idx']][key].values, key)
scan_no = np.array(query_scans)[psms["query_idx"]]
if bruker:
psms = add_column(psms, scan_no, "parent")
psms = add_column(psms, np.array(query_prec_id)[psms["query_idx"]], 'precursor_idx')
psms = add_column(psms, psms['feature_idx']+1, 'feature_id') #Bruker
else:
psms = add_column(psms, scan_no, "scan_no")
logging.info(f'Extracted columns from {len(psms):,} spectra.')
return psms, ions_
# Cell
import matplotlib.pyplot as plt
def plot_psms(index, ms_file):
df = ms_file.read(dataset_name='peptide_fdr')
ion_dict = {}
ion_dict[0] = ''
ion_dict[1] = '-H20'
ion_dict[2] = '-NH3'
spectrum = df.iloc[index]
start = spectrum['ion_idx']
end = spectrum['n_ions'] + start
query_data = ms_file.read_DDA_query_data()
ions = ms_file.read(dataset_name="ions")
ion = [('b'+str(int(_))).replace('b-','y') for _ in ions.iloc[start:end]['ion_index']]
losses = [ion_dict[int(_)] for _ in ions.iloc[start:end]['ion_type']]
ion = [a+b for a,b in zip(ion, losses)]
ints = ions.iloc[start:end]['ion_int'].astype('int').values
masses = ions.iloc[start:end]['ion_mass'].astype('float').values
ion_type = ions.iloc[start:end]['ion_type'].abs().values
query_idx = spectrum['raw_idx']
query_indices = query_data["indices_ms2"]
query_charges = query_data['charge2']
query_frags = query_data['mass_list_ms2']
query_ints = query_data['int_list_ms2']
query_idx_start = query_indices[query_idx]
query_idx_end = query_indices[query_idx + 1]
query_frag = query_frags[query_idx_start:query_idx_end]
query_int = query_ints[query_idx_start:query_idx_end]
ax = plt.figure(figsize=(15, 5))
plt.vlines(query_frag, 0, query_int, "k", label="Query", alpha=0.5)
plt.vlines(masses, ints, max(query_int)*(1+0.1*ion_type), "k", label="Hits", alpha=0.5, linestyle=':')
plt.vlines(masses, 0, ints, "r", label="Hits", alpha=0.5)
for i in range(len(masses)):
plt.text(masses[i], (1+0.1*ion_type[i])*max(query_int), ion[i])
figure_title = f"{spectrum['precursor']} - b-hits {spectrum['b_hits']}, y-hits {spectrum['y_hits']}, matched int {spectrum['matched_int_ratio']*100:.2f} %"
plt.xlabel("m/z")
plt.ylabel('Intensity')
plt.ylim([0, (1+0.1*max(ion_type)+0.1)*max(query_int)])
plt.legend()
plt.title(figure_title)
plt.show()
# Cell
import os
import pandas as pd
import copy
import alphapept.io
import alphapept.fasta
from typing import Callable
#This function is a wrapper and ist tested by the quick_test
def store_hdf(df: pd.DataFrame, path: str, key:str, replace:bool=False, swmr:bool = False):
"""Wrapper function to store a DataFrame in an hdf.
Args:
df (pd.DataFrame): DataFrame to be stored.
path (str): Target path of the hdf file.
key (str): Name of the field to be saved.
replace (bool, optional): Flag whether the field should be replaced.. Defaults to False.
swmr (bool, optional): Flag to use swmr(single write multiple read)-mode. Defaults to False.
"""
ms_file = alphapept.io.MS_Data_File(path.file_name, is_overwritable=True)
if replace:
ms_file.write(df, dataset_name=key, swmr = swmr)
else:
try:
df.to_hdf(path, key=key, append=True)
#TODO, append is not implemented yet
except (ValueError, AttributeError):
try:
old_df = ms_file.read(dataset_name=key, swmr = swmr)
new_df = pd.concat([old_df, df])
ms_file.write(new_df, dataset_name=key, swmr = swmr)
except KeyError: # File is created new
ms_file.write(df, dataset_name=key, swmr = swmr)
#This function is a wrapper and ist tested by the quick_test
def search_db(to_process:tuple, callback:Callable = None, parallel:bool=False, first_search:bool = True) -> Union[bool, str]:
"""Wrapper function to perform database search to be used by a parallel pool.
Args:
to_process (tuple): Tuple containing an index to the file and the experiment settings.
callback (Callable, optional): Callback function to indicate progress. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Defaults to False.
first_search (bool, optional): Flag to indicate this is the first search. Defaults to True.
Returns:
Union[bool, str]: Returns True if the search was successfull, otherwise returns a string containing the Exception.
"""
try:
index, settings = to_process
file_name = settings['experiment']['file_paths'][index]
base_file_name, ext = os.path.splitext(file_name)
ms_file = base_file_name+".ms_data.hdf"
skip = False
feature_calibration = False
ms_file_ = alphapept.io.MS_Data_File(
f"{ms_file}"
)
if not first_search:
try:
calibration = float(ms_file_.read(group_name = 'features', dataset_name='corrected_mass', attr_name='estimated_max_precursor_ppm'))
if calibration == 0:
logging.info('Calibration is 0, skipping second database search.')
skip = True
else:
settings['search']['prec_tol_calibrated'] = calibration*settings['search']['calibration_std_prec']
calib = settings['search']['prec_tol_calibrated']
logging.info(f"Found calibrated prec_tol with value {calib:.2f}")
except KeyError as e:
logging.info(f'{e}')
try:
fragment_std = float(ms_file_.read(dataset_name="estimated_max_fragment_ppm")[0])
skip = False
settings['search']['frag_tol_calibrated'] = fragment_std*settings['search']['calibration_std_frag']
calib = settings['search']['frag_tol_calibrated']
logging.info(f"Found calibrated frag_tol with value {calib:.2f}")
except KeyError as e:
logging.info(f'{e}')
if not skip:
db_data_path = settings['experiment']['database_path']
# TODO calibrated_fragments should be included in settings
query_data = ms_file_.read_DDA_query_data(
calibrated_fragments=True,
database_file_name=settings['experiment']['database_path']
)
features = ms_file_.read(dataset_name="features")
psms, num_specs_compared = get_psms(query_data, db_data_path, features, **settings["search"])
if len(psms) > 0:
psms, ions = get_score_columns(psms, query_data, db_data_path, features, **settings["search"])
if first_search:
logging.info('Saving first_search results to {}'.format(ms_file))
save_field = 'first_search'
else:
logging.info('Saving second_search results to {}'.format(ms_file))
save_field = 'second_search'
store_hdf(pd.DataFrame(psms), ms_file_, save_field, replace=True)
ion_columns = ['ion_index','ion_type','ion_int','db_int','ion_mass','db_mass','query_idx','db_idx','psms_idx']
store_hdf(pd.DataFrame(ions, columns = ion_columns), ms_file_, 'ions', replace=True)
else:
logging.info('No psms found.')
logging.info(f'Search of file {file_name} complete.')
return True
except Exception as e:
logging.error(f'Search of file {file_name} failed. Exception {e}.')
return f"{e}" #Can't return exception object, cast as string
# Cell
from .fasta import blocks, generate_peptides, add_to_pept_dict
from .io import list_to_numpy_f32
from .fasta import block_idx, generate_fasta_list, generate_spectra, check_peptide
from alphapept import constants
mass_dict = constants.mass_dict
import os
import alphapept.performance
#This function is a wrapper and ist tested by the quick_test
def search_fasta_block(to_process:tuple) -> (list, int):
"""Search fasta block. This file digests per block and does not use a saved database.
For searches with big fasta files or unspecific searches.
Args:
to_process (tuple): Tuple containing a fasta_index, fasta_block, a list of files and a list of experimental settings.
Returns:
list: A list of dataframes when searching the respective file.
int: Number of new peptides that were generated in this iteration.
"""
fasta_index, fasta_block, ms_files, settings = to_process
settings_ = settings[0]
spectra_block = settings_['fasta']['spectra_block']
to_add = List()
psms_container = [list() for _ in ms_files]
f_index = 0
pept_dict = {}
for element in fasta_block:
sequence = element["sequence"]
mod_peptides = generate_peptides(sequence, **settings_['fasta'])
pept_dict, added_peptides = add_to_pept_dict(pept_dict, mod_peptides, fasta_index+f_index)
if len(added_peptides) > 0:
to_add.extend(added_peptides)
f_index += 1
if len(to_add) > 0:
for seq_block in blocks(to_add, spectra_block):
spectra = generate_spectra(seq_block, mass_dict)
precmasses, seqs, fragmasses, fragtypes = zip(*spectra)
sortindex = np.argsort(precmasses)
fragmasses = np.array(fragmasses, dtype=object)[sortindex]
fragtypes = np.array(fragtypes, dtype=object)[sortindex]
lens = [len(_) for _ in fragmasses]
n_frags = sum(lens)
frags = np.zeros(n_frags, dtype=fragmasses[0].dtype)
frag_types = np.zeros(n_frags, dtype=fragtypes[0].dtype)
indices = np.zeros(len(lens) + 1, np.int64)
indices[1:] = lens
indices = np.cumsum(indices)
#Fill data
for _ in range(len(indices)-1):
start = indices[_]
end = indices[_+1]
frags[start:end] = fragmasses[_]
frag_types[start:end] = fragtypes[_]
db_data = {}
db_data["precursors"] = np.array(precmasses)[sortindex]
db_data["seqs"] = np.array(seqs)[sortindex]
db_data["fragmasses"] = frags
db_data["fragtypes"] = frag_types
db_data["indices"] = indices
for file_idx, ms_file in enumerate(ms_files):
query_data = alphapept.io.MS_Data_File(
f"{ms_file}"
).read_DDA_query_data(swmr=True)
try:
features = alphapept.io.MS_Data_File(
ms_file
).read(dataset_name="features",swmr=True)
except FileNotFoundError:
features = None
except KeyError:
features = None
psms, num_specs_compared = get_psms(query_data, db_data, features, **settings[file_idx]["search"])
if len(psms) > 0:
#This could be speed up..
psms, ions = get_score_columns(psms, query_data, db_data, features, **settings[file_idx]["search"])
fasta_indices = [set(x for x in pept_dict[_]) for _ in psms['sequence']]
psms_df = pd.DataFrame(psms)
psms_df['fasta_index'] = fasta_indices
psms_container[file_idx].append(psms_df)
return psms_container, len(to_add)
# Cell
def filter_top_n(temp:pd.DataFrame, top_n:int = 10)-> pd.DataFrame:
"""Takes a dataframe and keeps only the top n entries (based on hits).
Combines fasta indices for sequences.
Args:
temp (pd.DataFrame): Pandas DataFrame containing PSMs.
top_n (int, optional): Number of top-n entries to be kept. Defaults to 10.
Returns:
pd.DataFrame: Filtered DataFrame.
"""
pept_dict_ = {}
temp['temp_idx'] = np.arange(len(temp))
for k, v in temp[['sequence','fasta_index']].values:
if k in pept_dict_:
new_set = pept_dict_[k]
if isinstance(v, set):
new_set.update(v)
else:
new_set.add(v)
pept_dict_[k] = new_set
else:
pept_dict_[k] = set(v)
temp['fasta_index'] = [pept_dict_[_] for _ in temp['sequence']]
temp = temp.drop_duplicates(subset = ['raw_idx','sequence','hits','feature_idx'])
temp = temp.sort_values('hits', ascending = False).groupby('raw_idx').head(top_n)
return temp
# Cell
import psutil
import alphapept.constants as constants
from .fasta import get_fragmass, parse
def ion_extractor(df: pd.DataFrame, ms_file, frag_tol:float, ppm:bool)->(np.ndarray, np.ndarray):
"""Extracts the matched hits (ions) from a dataframe.
Args:
df (pd.DataFrame): Pandas dataframe containing the results of the first search.
ms_file : MsFile
frag_tol (float): Fragment tolerance for search.
ppm (bool): Flag to use ppm instead of Dalton.
Returns:
np.ndarray: Numpy recordarray storing the PSMs.
np.ndarray: Numpy recordarray storing the ions.
"""
query_data = ms_file.read_DDA_query_data()
query_indices = query_data["indices_ms2"]
query_frags = query_data['mass_list_ms2']
query_ints = query_data['int_list_ms2']
psms = df.to_records()
ion_count = 0
ions_ = List()
for i in range(len(psms)):
query_idx = psms[i]["raw_idx"]
db_idx = psms[i]["db_idx"]
query_idx_start = query_indices[query_idx]
query_idx_end = query_indices[query_idx + 1]
query_frag = query_frags[query_idx_start:query_idx_end]
query_int = query_ints[query_idx_start:query_idx_end]
seq = psms[i]['sequence']
db_frag, frag_type = get_fragmass(parse(seq), constants.mass_dict)
db_int = np.ones_like(db_frag)
ions = get_hits(query_frag, query_int, db_frag, db_int, frag_type, frag_tol, ppm, LOSSES)
n_ions = len(ions)
psms['n_ions'][i] = n_ions
psms['ion_idx'][i] = ion_count
ion_count += n_ions
ions_.append(ions)
ions_ = np.vstack(ions_)
return psms, ions_
#This function is a wrapper and ist tested by the quick_test
def search_parallel(settings: dict, calibration:Union[list, None] = None, fragment_calibration:Union[list, None] = None, callback: Union[Callable, None] = None) -> dict:
"""Function to search multiple ms_data files in parallel.
This function will additionally calculate fragments and precursor masses from a given FASTA file.
Args:
settings (dict): Settings file containg the experimental definitions.
calibration (Union[list, None], optional): List of calibrated offsets. Defaults to None.
fragment_calibration (Union[list, None], optional): List of calibrated fragment offsets. Defaults to None.
callback (Union[Callable, None], optional): Callback function. Defaults to None.
Returns:
dict: FASTA dictionary.
"""
fasta_list, fasta_dict = generate_fasta_list(fasta_paths = settings['experiment']['fasta_paths'], **settings['fasta'])
fasta_block = settings['fasta']['fasta_block']
ms_file_path = []
for _ in settings['experiment']['file_paths']:
base, ext = os.path.splitext(_)
ms_file_path.append(base + '.ms_data.hdf')
if calibration:
custom_settings = []
for _ in calibration:
settings_ = copy.deepcopy(settings)
settings_["search"]["prec_tol_calibrated"] = _
custom_settings.append(settings_)
else:
custom_settings = [settings for _ in ms_file_path]
if fragment_calibration:
for idx, _ in enumerate(fragment_calibration):
custom_settings[idx]["search"]["frag_tol_calibrated"] = _
logging.info(f"Number of FASTA entries: {len(fasta_list):,} - FASTA settings {settings['fasta']}")
to_process = [(idx_start, fasta_list[idx_start:idx_end], ms_file_path, custom_settings) for idx_start, idx_end in block_idx(len(fasta_list), fasta_block)]
memory_available = psutil.virtual_memory().available/1024**3
n_processes = int(memory_available // 4 )
logging.info(f'Setting Process limit to {n_processes}')
n_processes = alphapept.performance.set_worker_count(
worker_count=n_processes,
set_global=False
)
n_seqs_ = 0
df_cache = {}
ion_cache = {}
with alphapept.performance.AlphaPool(n_processes) as p:
max_ = len(to_process)
for i, (psm_container, n_seqs) in enumerate(p.imap_unordered(search_fasta_block, to_process)):
n_seqs_ += n_seqs
logging.info(f'Block {i+1} of {max_} complete - {((i+1)/max_*100):.2f} % - created peptides {n_seqs:,} ')
for j in range(len(psm_container)): #Temporary hdf files for avoiding saving issues
output = [_ for _ in psm_container[j]]
if len(output) > 0:
psms = pd.concat(output)
if ms_file_path[j] in df_cache:
temp = filter_top_n(pd.concat([df_cache[ms_file_path[j]], psms]))
selector = temp['temp_idx'].values
df_cache[ms_file_path[j]] = temp
else:
df_cache[ms_file_path[j]] = psms
if callback:
callback((i+1)/max_)
for idx, _ in enumerate(ms_file_path):
if _ in df_cache:
x = df_cache[_]
ms_file = alphapept.io.MS_Data_File(_)
x['fasta_index'] = x['fasta_index'].apply(lambda x: ','.join(str(_) for _ in x))
if 'frag_tol_calibrated' in custom_settings[idx]['search']:
frag_tol = custom_settings[idx]['search']['frag_tol_calibrated']
else:
frag_tol = custom_settings[idx]['search']['frag_tol']
ppm = custom_settings[idx]['search']['ppm']
if calibration:
save_field = 'first_search'
else:
save_field = 'second_search'
psms, ions = ion_extractor(x, ms_file, frag_tol, ppm)
store_hdf( | pd.DataFrame(psms) | pandas.DataFrame |
import numpy as np
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import statsmodels.api as sm
import matplotlib.dates as mdates
import warnings
import itertools
import dateutil
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV as gsc
from sklearn.linear_model import Ridge,Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
def main ():
# Using svm
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
S1,S2=AQI_SVM(data)
S3,S4=AQI_Feature_importance_SVM(data)
S5,S6=AQI_Domain_Knowledge_SVM(data)
S7,S8=AQI_without_Domain_Knowledge_SVM(data)
##Linear Regression
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
LR1,LR2=AQI(data)
LR3,LR4=AQI_Feature_importance(data)
LR5,LR6==AQI_Domain_Knowledge(data)
LR7,LR8=AQI_without_Domain_Knowledge(data)
## Predincting for next day
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
normalize(data)
y=pd.read_csv('AQI_prediction_add.csv')
LR_F1,LR_F2=AQI_Future(data,y.AQI_predicted)
LR_F3,LR_F4=AQI_Feature_importance_Future(data,y.AQI_predicted)
LR_F5,LR_F6=AQI_Domain_Knowledge_Future(data,y.AQI_predicted)
LR_F7,LR_F8=AQI_without_Domain_Knowledge_Future(data,y.AQI_predicted)
##Predicting for Autumn Season
data= | pd.read_csv('autumn_data.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.pipelines.components import LogTransformer
def test_log_transformer_init():
log_ = LogTransformer()
assert log_.parameters == {}
def test_log_transformer_no_y(X_y_regression):
X, y = X_y_regression
y = None
output_X, output_y = LogTransformer().fit_transform(X, y)
np.testing.assert_equal(X, output_X)
assert not output_y
@pytest.mark.parametrize("input_type", ["np", "pd", "ww"])
@pytest.mark.parametrize("data_type", ["positive", "mixed", "negative"])
def test_log_transformer_fit_transform(data_type, input_type, X_y_regression):
X_, y_ = X_y_regression
if data_type == "positive":
y_ = np.abs(y_)
elif data_type == "negative":
y_ = -np.abs(y_)
X = pd.DataFrame(X_)
y = | pd.Series(y_) | pandas.Series |
import os
from glob import glob
from tqdm import tqdm as print_progress
from datetime import datetime, timedelta, date
import dateutil
import math
import numpy as np
import pandas as pd
import featuretools as ft
from featuretools.variable_types import Id, Numeric, Categorical, Datetime
import ai.src.utils as utils
from ai.src.utils.utils import *
from ai.src.utils.customized_variable_types import *
START_DATE = pd.to_datetime('2000-01-01', format='%Y-%m-%d')
END_DATE = pd.to_datetime(datetime.now().strftime("%Y-%m-%d"), format='%Y-%m-%d')
def data_impute(df: pd.DataFrame) -> pd.DataFrame:
df = utils.feature_engineering.filter_out_minor_categoricals(df=df,
column_names=['Channel', 'Country'])
# Drop missing-valued samples
df.dropna(subset=['TotalPayment', 'ArrivalDate', 'DepartureDate'], inplace=True)
# Normalize
df.Status = df.Status.str.upper()
return df
def generate_features_recency(guest_data: pd.DataFrame, END_DATE):
most_recent_date = guest_data.DepartureDate.max()
inactive_days = int((END_DATE-most_recent_date) / pd.Timedelta(days=1))
active_months = list(guest_data.ArrivalDate.dt.month) + \
list(guest_data.DepartureDate.dt.month)
# list(guest_data.CreatedDate.dt.month) + \
most_active_month = max(set(active_months), key=active_months.count)
most_active_quarter = month_to_quarter(most_active_month)
least_active_month = min(set(active_months), key=active_months.count)
least_active_quarter = month_to_quarter(least_active_month)
return inactive_days, most_active_month, least_active_month, most_active_quarter, least_active_quarter
def generate_features_frequency(guest_data: pd.DataFrame, customer_lifetime,
date_1st_booking, END_DATE, MAM_data):
n_orders = len(guest_data.groupby(by=['ArrivalDate']))
average_orders = int(n_orders/customer_lifetime) if customer_lifetime>1 else n_orders
n_orders_in_MAM = len(MAM_data)
average_orders_in_MAM = int(n_orders_in_MAM/customer_lifetime) if customer_lifetime>1 else n_orders_in_MAM
return n_orders, average_orders, average_orders_in_MAM
def generate_features_monetary(guest_data: pd.DataFrame, customer_lifetime, MAM_data):
total_revenue = guest_data.TotalPayment.sum()
average_revenue = total_revenue/customer_lifetime if customer_lifetime>1 else total_revenue
revenue_in_MAM = MAM_data.TotalPayment.sum()
average_revenue_in_MAM = revenue_in_MAM/int(customer_lifetime) if customer_lifetime>1 else revenue_in_MAM
return total_revenue, average_revenue, average_revenue_in_MAM
def generate_features_nonRFM(guest_data: pd.DataFrame, n_orders, total_revenue):
nights_in_house = int(np.mean(guest_data.Nights))
arrdates = guest_data.ArrivalDate.sort_values()
days_between_orders = (arrdates.values[1:] - arrdates.values[:-1]) / pd.Timedelta(days=1)
days_between_orders = np.mean(days_between_orders) if len(days_between_orders)>0 \
else (START_DATE-END_DATE) / pd.Timedelta(days=1)
days_between_orders = int(days_between_orders)
n_rooms = len(guest_data)
average_rooms = int(n_rooms/n_orders)
average_revenue_per_order = total_revenue / n_orders
average_revenue_per_room = total_revenue / n_rooms
return days_between_orders, nights_in_house, n_rooms, \
average_rooms, average_revenue_per_order, average_revenue_per_room
def feature_engineering_manual_for_LTV(df: pd.DataFrame) -> pd.DataFrame:
global START_DATE, END_DATE
features = [
# Recency features
'InactiveDays', 'MostActiveMonth', 'LeastActiveMonth', 'MostActiveQuarter', 'LeastActiveQuarter',
# Frequency features
'NumberOfOrders', 'AverageOrdersPerYear', 'AverageOrdersInMostActiveMonth',
# Monetary features
'TotalRevenue', 'AverageRevenuePerYear', 'AverageRevenueInMostActiveMonth',
# Non-RFM features
'AverageDaysBetweenOrders', 'AverageNightsInHouse', 'NumberOfRooms',
'AverageRoomsPerOrder', 'AverageRevenuePerOrder', 'AverageRevenuePerRoom',
# Personal features
'Lifetime', 'Country',
]
guests_df = pd.DataFrame(columns=features)
for guest_id, guest_data in print_progress(df.groupby(by=['GuestID'])):
# Filter by time window
# date_1st_booking = guest_data.CreatedDate.min()
date_1st_booking = guest_data.ArrivalDate.min()
customer_lifetime = (END_DATE-date_1st_booking).days / 365.25
# Generate features for RECENCY
inactive_days, most_active_month, least_active_month, \
most_active_quarter, least_active_quarter = generate_features_recency(guest_data, END_DATE)
# Filter Most Active Month
MAM_data = guest_data.loc[
# (guest_data.CreatedDate.dt.month == most_active_month) |
(guest_data.ArrivalDate.dt.month == most_active_month) |
(guest_data.DepartureDate.dt.month == most_active_month)
]
# Generate features for FREQUENCY
n_orders, average_orders, average_orders_in_MAM = generate_features_frequency(
guest_data, customer_lifetime, date_1st_booking, END_DATE, MAM_data
)
# Generate features for MONETARY
total_revenue, average_revenue, average_revenue_in_MAM = generate_features_monetary(
guest_data, customer_lifetime, MAM_data
)
# Generate features for LTV classification
days_between_orders, nights_in_house, n_rooms, \
average_rooms, average_revenue_per_order, average_revenue_per_room = generate_features_nonRFM(
guest_data, n_orders, total_revenue
)
# Feed generated features into DataFrame
guests_df.loc[guest_id] = [
inactive_days, most_active_month, least_active_month, most_active_quarter, least_active_quarter,
n_orders, average_orders, average_orders_in_MAM,
total_revenue, average_revenue, average_revenue_in_MAM,
days_between_orders, nights_in_house, n_rooms, average_rooms, average_revenue_per_order, average_revenue_per_room,
customer_lifetime, guest_data.Country.unique()[0]
]
guests_df['GuestID'] = list(guests_df.index)
guests_df.reset_index(drop=True, inplace=True)
for col in guests_df.columns:
if col == 'Country':
continue
guests_df[col] = guests_df[col].astype(float)
return guests_df
def feature_engineering_auto_for_LTV(df: pd.DataFrame) -> pd.DataFrame:
df_name = 'returning_guests'
EntitySet = ft.EntitySet(id=df_name)
EntitySet = EntitySet.entity_from_dataframe(
entity_id=df_name,
dataframe=df,
make_index=False,
index='GuestID',
variable_types={
'GuestID': Id,
'Country': Categorical,
'Lifetime': Age,
'TotalRevenue': Price,
'AverageRevenuePerYear': Price,
'AverageRevenueInMostActiveMonth': Price,
'InactiveDays': Numeric,
'NumberOfRooms': Numeric,
'NumberOfOrders': Numeric,
'AverageNightsInHouse': Numeric,
'AverageDaysBetweenOrders': Numeric,
'AverageRevenuePerOrder': Numeric,
'AverageRevenuePerRoom': Numeric,
'AverageRoomsPerOrder': Numeric,
'AverageOrdersPerYear': Numeric,
'AverageOrdersInMostActiveMonth': Numeric,
'MostActiveMonth': Categorical,
'LeastActiveMonth': Categorical,
'MostActiveQuarter': Categorical,
'LeastActiveQuarter': Categorical,
}
)
feature_matrix, feature_definitions = utils.feature_engineering.auto_feature_engineering(
entity_set=EntitySet, table_name=df_name, verbose=True
)
return feature_matrix
def feature_engineering_auto_for_1stBooking(df: pd.DataFrame) -> pd.DataFrame:
df_name = '1st_reservation'
EntitySet = ft.EntitySet(id=df_name)
EntitySet = EntitySet.entity_from_dataframe(
entity_id=df_name,
dataframe=df,
make_index=False,
index='GuestID',
variable_types={
'GuestID': Id,
'Adults': People,
'Children': People,
'TotalPayment': Price,
'Nights': Numeric,
'RoomPrice': Numeric,
'NumberOfRooms': Numeric,
'Channel': Categorical,
'Status': Categorical,
'RoomGroupID': Categorical,
# 'CreatedDate': Datetime,
'ArrivalDate': Datetime,
'DepartureDate': Datetime,
}
)
feature_matrix, feature_definitions = utils.feature_engineering.auto_feature_engineering(
entity_set=EntitySet, table_name=df_name, verbose=True
)
return feature_matrix
def feature_engineering_pipeline(folder_path: str, current_year: int = 0):
global END_DATE
if current_year != 0:
input_date = str(int(current_year)+1) + "-01-01"
END_DATE = | pd.to_datetime(input_date, format='%Y-%m-%d') | pandas.to_datetime |
import calendar
import pandas as pd
from colourutils import extend_colour_map
def extend_data_range(data):
"""
Extends the index of the given Series so that it has daily values, starting from the 1st of the earliest month and
ending on the last day of the latest month.
:param data: The Series to be extended with a datetime index
:return: The Series with an extended daily index
"""
earliest_date = data.index.min()
first_month_start = | pd.Timestamp(year=earliest_date.year, month=earliest_date.month, day=1) | pandas.Timestamp |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = | pd.DataFrame(PerClassMetric) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import pandas as pd
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio
from lifelines.utils import (
_get_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_cdf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_pandas,
CensoringType,
interpolate_at_times,
format_p_value,
)
__all__ = ["CoxPHFitter"]
class BatchVsSingle:
@staticmethod
def decide(batch_mode, n_unique, n_total, n_vars):
frac_dups = n_unique / n_total
if batch_mode or (
# https://github.com/CamDavidsonPilon/lifelines/issues/591 for original issue.
# new values from from perf/batch_vs_single script.
(batch_mode is None)
and (
(
6.876218e-01
+ -1.796993e-06 * n_total
+ -1.204271e-11 * n_total ** 2
+ 1.912500e00 * frac_dups
+ -8.121036e-01 * frac_dups ** 2
+ 4.916605e-06 * n_total * frac_dups
+ -5.888875e-03 * n_vars
+ 5.473434e-09 * n_vars * n_total
)
< 1
)
):
return "batch"
return "single"
class CoxPHFitter(BaseFitter):
r"""
This class implements fitting Cox's proportional hazard model:
.. math:: h(t|x) = h_0(t) \exp((x - \overline{x})' \beta)
Parameters
----------
alpha: float, optional (default=0.05)
the level in the confidence intervals.
tie_method: string, optional
specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: float, optional (default=0.0)
Attach an L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`\beta_i`.
The penalty is :math:`\frac{1}{2} \text{penalizer} ||\beta||^2`.
strata: list, optional
specify a list of columns to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
Examples
--------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>> rossi = load_rossi()
>>> cph = CoxPHFitter()
>>> cph.fit(rossi, 'week', 'arrest')
>>> cph.print_summary()
Attributes
----------
params_ : Series
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Series
The exp(coefficients)
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the hazard coefficients
durations: Series
The durations provided
event_observed: Series
The event_observed variable provided
weights: Series
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Series
the standard errors of the estimates
score_: float
the concordance index of the model.
baseline_hazard_: DataFrame
baseline_cumulative_hazard_: DataFrame
baseline_survival_: DataFrame
"""
_KNOWN_MODEL = True
def __init__(self, alpha=0.05, tie_method="Efron", penalizer=0.0, strata=None):
super(CoxPHFitter, self).__init__(alpha=alpha)
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != "Efron":
raise NotImplementedError("Only Efron is available at the moment.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
@CensoringType.right_censoring
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
params_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.params_ = pd.Series(params_, index=X.columns, name="coef") / self._norm_std
self.hazard_ratios_ = pd.Series(np.exp(self.params_), index=X.columns, name="exp(coef)")
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
def _preprocess_dataframe(self, df):
# this should be a pure function
df = df.copy()
if self.strata is not None:
df = df.sort_values(by=_to_list(self.strata) + [self.duration_col])
original_index = df.index.copy()
df = df.set_index(self.strata)
else:
df = df.sort_values(by=self.duration_col)
original_index = df.index.copy()
# Extract time and event
T = df.pop(self.duration_col)
E = (
df.pop(self.event_col)
if (self.event_col is not None)
else pd.Series(np.ones(self._n_examples), index=df.index, name="E")
)
W = (
df.pop(self.weights_col)
if (self.weights_col is not None)
else pd.Series(np.ones((self._n_examples,)), index=df.index, name="weights")
)
_clusters = df.pop(self.cluster_col).values if self.cluster_col else None
X = df.astype(float)
T = T.astype(float)
# we check nans here because converting to bools maps NaNs to True..
check_nans_or_infs(E)
E = E.astype(bool)
self._check_values(X, T, E, W)
return X, T, E, W, original_index, _clusters
def _check_values(self, X, T, E, W):
check_for_numeric_dtypes_or_raise(X)
check_nans_or_infs(T)
check_nans_or_infs(X)
check_low_var(X)
check_complete_separation(X, E, T, self.event_col)
# check to make sure their weights are okay
if self.weights_col:
if (W.astype(int) != W).any() and not self.robust:
warnings.warn(
"""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
""",
StatisticalWarning,
)
if (W <= 0).any():
raise ValueError("values in weight column %s must be positive." % self.weights_col)
def _fit_model(
self,
X,
T,
E,
weights=None,
initial_point=None,
step_size=None,
precision=1e-07,
show_progress=True,
max_steps=50,
): # pylint: disable=too-many-statements,too-many-branches
"""
Newton Rhaphson algorithm for fitting CPH model.
Note
----
The data is assumed to be sorted on T!
Parameters
----------
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
weights: (n) an iterable representing weights per observation.
initial_point: (d,) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float, optional
> 0.001 to determine a starting step size in NR algorithm.
precision: float, optional
the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: boolean, optional
since the fitter is iterative, show convergence
diagnostics.
max_steps: int, optional
the maximum number of iterations of the Newton-Rhaphson algorithm.
Returns
-------
beta: (1,d) numpy array.
"""
self.path = []
assert precision <= 1.0, "precision must be less than or equal to 1."
_, d = X.shape
# make sure betas are correct size.
if initial_point is not None:
assert initial_point.shape == (d,)
beta = initial_point
else:
beta = np.zeros((d,))
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
# Method of choice is just efron right now
if self.tie_method == "Efron":
decision = BatchVsSingle.decide(self._batch_mode, T.nunique(), X.shape[0], X.shape[1])
get_gradients = getattr(self, "_get_efron_values_%s" % decision)
self._batch_mode = decision == "batch"
else:
raise NotImplementedError("Only Efron is available.")
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
while converging:
self.path.append(beta.copy())
i += 1
if self.strata is None:
h, g, ll = get_gradients(X.values, T.values, E.values, weights.values, beta)
else:
g = np.zeros_like(beta)
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for _h, _g, _ll in self._partition_by_strata_and_apply(X, T, E, weights, get_gradients, beta):
g += _g
h += _h
ll += _ll
if i == 1 and np.all(beta == 0):
# this is a neat optimization, the null partial likelihood
# is the same as the full partial but evaluated at zero.
# if the user supplied a non-trivial initial point, we need to delay this.
self._ll_null_ = ll
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta
h.flat[:: d + 1] -= self.penalizer
# reusing a piece to make g * inv(h) * g.T faster later
try:
inv_h_dot_g_T = spsolve(-h, g, assume_a="pos", check_finite=False)
except ValueError as e:
if "infs or NaNs" in str(e):
raise ConvergenceError(
"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
else:
# something else?
raise e
except LinAlgError as e:
raise ConvergenceError(
"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
delta = inv_h_dot_g_T
if np.any(np.isnan(delta)):
raise ConvergenceError(
"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
)
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
# reusing an above piece to make g * inv(h) * g.T faster.
newton_decrement = g.dot(inv_h_dot_g_T) / 2
if show_progress:
print(
"\rIteration %d: norm_delta = %.5f, step_size = %.4f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
% (i, norm_delta, step_size, ll, newton_decrement, time.time() - start),
end="",
)
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll != 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < precision:
converging, completed = False, True
elif i >= max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
converging, completed = False, False
elif step_size <= 0.00001:
converging, completed = False, False
elif abs(ll) < 0.0001 and norm_delta > 1.0:
warnings.warn(
"The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/q/11109/11867 for more.\n",
ConvergenceWarning,
)
converging, completed = False, False
beta += step_size * delta
previous_ll = ll
step_size = step_sizer.update(norm_delta).next()
self._hessian_ = hessian
self._score_ = gradient
self.log_likelihood_ = ll
if show_progress and completed:
print("Convergence completed after %d iterations." % (i))
elif show_progress and not completed:
print("Convergence failed. See any warning messages.")
# report to the user problems that we detect.
if completed and norm_delta > 0.1:
warnings.warn(
"Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"
% norm_delta,
ConvergenceWarning,
)
elif not completed:
warnings.warn(
"Newton-Rhaphson failed to converge sufficiently in %d steps.\n" % max_steps, ConvergenceWarning
)
return beta
def _get_efron_values_single(self, X, T, E, weights, beta):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(ฯ1 + ฯ2 + ฯ3) is adjusted from sum_j^{5} ฯj after one fails. Similarly two-third
of (ฯ1 + ฯ2 + ฯ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = np.zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * np.exp(np.dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = np.multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = np.arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = np.zeros((d,))
tie_phi = 0
tie_phi_x = np.zeros((d,))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik
@staticmethod
def _trivial_log_likelihood_batch(T, E, weights):
# used for log-likelihood test
n = T.shape[0]
log_lik = 0
_, counts = np.unique(-T, return_counts=True)
risk_phi = 0
pos = n
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
weights_at_t = weights[slice_]
phi_i = weights_at_t
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
# Calculate the sums of Tie set
deaths = E[slice_]
tied_death_counts = deaths.astype(int).sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
weights_deaths = weights_at_t[deaths]
weight_count = weights_deaths.sum()
if tied_death_counts > 1:
tie_phi = phi_i[deaths].sum()
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
pos -= count_of_removals
return log_lik
@staticmethod
def _trivial_log_likelihood_single(T, E, weights):
# assumes sorted on T!
log_lik = 0
n = T.shape[0]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
# Calculate phi values
phi_i = weights[i]
w = weights[i]
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
# Calculate sums of Ties, if this is an event
if ei:
tie_phi = tie_phi + phi_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
if tied_death_counts > 1:
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
# reset tie values
tied_death_counts = 0
weight_count = 0.0
tie_phi = 0
return log_lik
def _get_efron_values_batch(self, X, T, E, weights, beta): # pylint: disable=too-many-locals
"""
Assumes sorted on ascending on T
Calculates the first and second order vector differentials, with respect to beta.
A good explanation for how Efron handles ties. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(ฯ1 + ฯ2 + ฯ3) is adjusted from sum_j^{5} ฯj after one fails. Similarly two-third
of (ฯ1 + ฯ2 + ฯ3) is adjusted after first two individuals fail, etc.
Returns
-------
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# weights = weights[:, None]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# counts are sorted by -T
_, counts = np.unique(-T, return_counts=True)
scores = weights * np.exp(np.dot(X, beta))
pos = n
ZERO_TO_N = np.arange(counts.max())
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
X_at_t = X[slice_]
weights_at_t = weights[slice_]
deaths = E[slice_]
phi_i = scores[slice_, None]
phi_x_i = phi_i * X_at_t
phi_x_x_i = np.dot(X_at_t.T, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
risk_phi_x = risk_phi_x + (phi_x_i).sum(0)
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate the sums of Tie set
tied_death_counts = deaths.sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
"""
I think there is another optimization that can be made if we sort on
T and E. Using some accounting, we can skip all the [death] indexing below.
"""
xi_deaths = X_at_t[deaths]
weights_deaths = weights_at_t[deaths]
x_death_sum = np.einsum("a,ab->b", weights_deaths, xi_deaths)
weight_count = weights_deaths.sum()
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
# a lot of this is now in Einstein notation for performance, but see original "expanded" code here
# https://github.com/CamDavidsonPilon/lifelines/blob/e7056e7817272eb5dff5983556954f56c33301b1/lifelines/fitters/coxph_fitter.py#L755-L789
# it's faster if we can skip computing these when we don't need to.
phi_x_i_deaths = phi_x_i[deaths]
tie_phi = phi_i[deaths].sum()
tie_phi_x = (phi_x_i_deaths).sum(0)
tie_phi_x_x = np.dot(xi_deaths.T, phi_x_i_deaths)
increasing_proportion = ZERO_TO_N[:tied_death_counts] / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
# computes outer products and sums them together.
# Naive approach is to
# 1) broadcast tie_phi_x_x and increasing_proportion into a (tied_death_counts, d, d) matrix
# 2) broadcast risk_phi_x_x and denom into a (tied_death_counts, d, d) matrix
# 3) subtract them, and then sum to (d, d)
# Alternatively, we can sum earlier without having to explicitly create (_, d, d) matrices. This is used here.
#
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
# no tensors here, but do some casting to make it easier in the converging step next.
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
# This is a batch outer product.
# given a matrix t, for each row, m, compute it's outer product: m.dot(m.T), and stack these new matrices together.
# which would be: np.einsum("Bi, Bj->Bij", t, t)
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
pos -= count_of_removals
return hessian, gradient, log_lik
def _partition_by_strata(self, X, T, E, weights, as_dataframes=False):
for stratum, stratified_X in X.groupby(self.strata):
stratified_E, stratified_T, stratified_W = (E.loc[[stratum]], T.loc[[stratum]], weights.loc[[stratum]])
if not as_dataframes:
yield (stratified_X.values, stratified_T.values, stratified_E.values, stratified_W.values), stratum
else:
yield (stratified_X, stratified_T, stratified_E, stratified_W), stratum
def _partition_by_strata_and_apply(self, X, T, E, weights, function, *args):
for (stratified_X, stratified_T, stratified_E, stratified_W), _ in self._partition_by_strata(X, T, E, weights):
yield function(stratified_X, stratified_T, stratified_E, stratified_W, *args)
def _compute_martingale(self, X, T, E, _weights, index=None):
# TODO: _weights unused
partial_hazard = self.predict_partial_hazard(X)[0].values
if not self.strata:
baseline_at_T = self.baseline_cumulative_hazard_.loc[T, "baseline cumulative hazard"].values
else:
baseline_at_T = np.empty(0)
for name, T_ in T.groupby(by=self.strata):
baseline_at_T = np.append(baseline_at_T, self.baseline_cumulative_hazard_[name].loc[T_])
martingale = E - (partial_hazard * baseline_at_T)
return pd.DataFrame(
{self.duration_col: T.values, self.event_col: E.values, "martingale": martingale.values}, index=index
)
def _compute_deviance(self, X, T, E, weights, index=None):
df = self._compute_martingale(X, T, E, weights, index)
rmart = df.pop("martingale")
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
log_term = np.where((E.values - rmart.values) <= 0, 0, E.values * np.log(E.values - rmart.values))
deviance = np.sign(rmart) * np.sqrt(-2 * (rmart + log_term))
df["deviance"] = deviance
return df
def _compute_scaled_schoenfeld(self, X, T, E, weights, index=None):
r"""
Let s_k be the kth schoenfeld residuals. Then E[s_k] = 0.
For tests of proportionality, we want to test if \beta_i(t) is \beta_i (constant) or not.
Let V_k be the contribution to the information matrix at time t_k. A main result from Grambsch and Therneau is that
\beta(t) = E[s_k*V_k^{-1} + \hat{beta}]
so define s_k^* = s_k*V_k^{-1} + \hat{beta} as the scaled schoenfeld residuals.
We can approximate V_k with Hessian/d, so the inverse of Hessian/d is (d * variance_matrix_)
Notes
-------
lifelines does not add the coefficients to the final results, but R does when you call residuals(c, "scaledsch")
"""
n_deaths = self.event_observed.sum()
scaled_schoenfeld_resids = n_deaths * self._compute_schoenfeld(X, T, E, weights, index).dot(
self.variance_matrix_
)
scaled_schoenfeld_resids.columns = self.params_.index
return scaled_schoenfeld_resids
def _compute_schoenfeld(self, X, T, E, weights, index=None):
# TODO: should the index by times, i.e. T[E]?
# Assumes sorted on T and on strata
# cluster does nothing to this, as expected.
_, d = X.shape
if self.strata is not None:
schoenfeld_residuals = np.empty((0, d))
for schoenfeld_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_schoenfeld_within_strata
):
schoenfeld_residuals = np.append(schoenfeld_residuals, schoenfeld_residuals_in_strata, axis=0)
else:
schoenfeld_residuals = self._compute_schoenfeld_within_strata(X.values, T.values, E.values, weights.values)
# schoenfeld residuals are only defined for subjects with a non-zero event.
df = | pd.DataFrame(schoenfeld_residuals[E, :], columns=self.params_.index, index=index[E]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/schools8_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="W_0ED20uQKha"
# In this notebook, we fit a hierarchical Bayesian model to the "8 schools" dataset.
# See also https://github.com/probml/pyprobml/blob/master/scripts/schools8_pymc3.py
# + id="HXRokZL1QPvB"
# %matplotlib inline
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
# + id="C5EHDB-rQSIa" colab={"base_uri": "https://localhost:8080/"} outputId="d6d8b024-96ba-4014-97d9-ddef6d88349e"
# !pip install -U pymc3>=3.8
import pymc3 as pm
print(pm.__version__)
import theano.tensor as tt
import theano
# #!pip install arviz
import arviz as az
# + id="sKlvHNY6RUaP"
# !mkdir ../figures
# + [markdown] id="-jby_J17HqBT"
# # Data
# + id="8pNC3UANQjeO" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="8f91ec2e-e81b-452b-dcf7-8c9f6ddda82a"
# https://github.com/probml/pyprobml/blob/master/scripts/schools8_pymc3.py
# Data of the Eight Schools Model
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
print(np.mean(y))
print(np.median(y))
names=[];
for t in range(8):
names.append('{}'.format(t));
# Plot raw data
fig, ax = plt.subplots()
y_pos = np.arange(8)
ax.errorbar(y,y_pos, xerr=sigma, fmt='o')
ax.set_yticks(y_pos)
ax.set_yticklabels(names)
ax.invert_yaxis() # labels read top-to-bottom
plt.title('8 schools')
plt.savefig('../figures/schools8_data.png')
plt.show()
# + [markdown] id="vcAdKbnXHsKE"
# # Centered model
# + id="-Lxa_JgfQmAI" colab={"base_uri": "https://localhost:8080/", "height": 723} outputId="573cdde1-a178-4949-de75-af036d02f6dd"
# Centered model
with pm.Model() as Centered_eight:
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=5)
sigma_alpha = pm.HalfCauchy('sigma_alpha', beta=5)
alpha = pm.Normal('alpha', mu=mu_alpha, sigma=sigma_alpha, shape=J)
obs = pm.Normal('obs', mu=alpha, sigma=sigma, observed=y)
log_sigma_alpha = pm.Deterministic('log_sigma_alpha', tt.log(sigma_alpha))
np.random.seed(0)
with Centered_eight:
trace_centered = pm.sample(1000, chains=4, return_inferencedata=False)
pm.summary(trace_centered).round(2)
# PyMC3 gives multiple warnings about divergences
# Also, see r_hat ~ 1.01, ESS << nchains*1000, especially for sigma_alpha
# We can solve these problems below by using a non-centered parameterization.
# In practice, for this model, the results are very similar.
# + id="pOrDPo_lQob_" colab={"base_uri": "https://localhost:8080/"} outputId="0cbd7421-2754-43c2-a468-7250ae30b8d1"
# Display the total number and percentage of divergent chains
diverging = trace_centered['diverging']
print('Number of Divergent Chains: {}'.format(diverging.nonzero()[0].size))
diverging_pct = diverging.nonzero()[0].size / len(trace_centered) * 100
print('Percentage of Divergent Chains: {:.1f}'.format(diverging_pct))
# + id="bYbhbC-kT8GV" outputId="77b27048-57ad-456c-f6ea-7bbeee7d1d94" colab={"base_uri": "https://localhost:8080/"}
dir(trace_centered)
# + id="9ODVo7cLUKs8" outputId="505c9b7c-6b7f-4b12-be22-c67809d19641" colab={"base_uri": "https://localhost:8080/"}
trace_centered.varnames
# + id="gClLFgqHVuW1" outputId="7447a76c-0e85-4d11-ca0a-fd24babe57dd" colab={"base_uri": "https://localhost:8080/", "height": 356}
with Centered_eight:
#fig, ax = plt.subplots()
az.plot_autocorr(trace_centered, var_names=['mu_alpha', 'sigma_alpha'], combined=True);
plt.savefig('schools8_centered_acf_combined.png', dpi=300)
# + id="uWPD88BxTkMj" outputId="ed94b053-2ebc-41f1-91c3-12f0d7eec423" colab={"base_uri": "https://localhost:8080/", "height": 452}
with Centered_eight:
#fig, ax = plt.subplots()
az.plot_autocorr(trace_centered, var_names=['mu_alpha', 'sigma_alpha']);
plt.savefig('schools8_centered_acf.png', dpi=300)
# + id="Uv1QEiQOQtGc" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="7ce96252-9002-4f18-a64c-c55046f5415d"
with Centered_eight:
az.plot_forest(trace_centered, var_names="alpha",
hdi_prob=0.95, combined=True);
plt.savefig('schools8_centered_forest_combined.png', dpi=300)
# + id="cgzmwxVGZxub" outputId="8979ca4c-d9df-43bb-847e-bad33b2258bb" colab={"base_uri": "https://localhost:8080/", "height": 542}
with Centered_eight:
az.plot_forest(trace_centered, var_names="alpha",
hdi_prob=0.95, combined=False);
plt.savefig('schools8_centered_forest.png', dpi=300)
# + [markdown] id="BkphbYr_HxOj"
# # Non-centered
# + id="jLFiQS0ZQvR4" colab={"base_uri": "https://localhost:8080/", "height": 905} outputId="8c0caa4b-4aa4-4685-f8ef-ef23ba60b82c"
# Non-centered parameterization
with pm.Model() as NonCentered_eight:
mu_alpha = pm.Normal('mu_alpha', mu=0, sigma=5)
sigma_alpha = pm.HalfCauchy('sigma_alpha', beta=5)
alpha_offset = pm.Normal('alpha_offset', mu=0, sigma=1, shape=J)
alpha = pm.Deterministic('alpha', mu_alpha + sigma_alpha * alpha_offset)
#alpha = pm.Normal('alpha', mu=mu_alpha, sigma=sigma_alpha, shape=J)
obs = pm.Normal('obs', mu=alpha, sigma=sigma, observed=y)
log_sigma_alpha = pm.Deterministic('log_sigma_alpha', tt.log(sigma_alpha))
np.random.seed(0)
with NonCentered_eight:
trace_noncentered = pm.sample(1000, chains=4)
pm.summary(trace_noncentered).round(2)
# Samples look good: r_hat = 1, ESS ~= nchains*1000
# + id="RyB5Qu-MQxuM" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="4a21b628-5b80-4ae4-a148-a208f33d6d43"
with NonCentered_eight:
az.plot_autocorr(trace_noncentered, var_names=['mu_alpha', 'sigma_alpha'], combined=True);
plt.savefig('schools8_noncentered_acf_combined.png', dpi=300)
# + id="JHmvYgsAQzuK" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="5ed95cc6-49b8-4bc6-acca-59f7c5f5c06b"
with NonCentered_eight:
az.plot_forest(trace_noncentered, var_names="alpha",
combined=True, hdi_prob=0.95);
plt.savefig('schools8_noncentered_forest_combined.png', dpi=300)
# + id="vb8tzwUhXlW0" colab={"base_uri": "https://localhost:8080/", "height": 568} outputId="efad1751-55c1-4d1d-97b8-198f67af8935"
az.plot_forest([trace_centered, trace_noncentered], model_names=['centered', 'noncentered'],
var_names="alpha",
combined=True, hdi_prob=0.95);
plt.axvline(np.mean(y), color='k', linestyle='--')
# + id="JETMmNSuZUV7" colab={"base_uri": "https://localhost:8080/", "height": 647} outputId="835e3d2c-7874-41b5-d22e-d64e18fae9ab"
az.plot_forest([trace_centered, trace_noncentered], model_names=['centered', 'noncentered'],
var_names="alpha", kind='ridgeplot',
combined=True, hdi_prob=0.95);
# + [markdown] id="Q_SYYgL0H13G"
# # Funnel of hell
# + id="E3CtP2kcT4s5" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="17af872c-3d56-48e6-be05-a5aab0b4aa39"
# Plot the "funnel of hell"
# Based on
# https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(trace_centered['mu_alpha'], name='mu_alpha')
y = pd.Series(trace_centered['log_sigma_alpha'], name='log_sigma_alpha')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='ยต', ylabel='log(sigma)');
#axs[0].axhline(0.01)
x = | pd.Series(trace_noncentered['mu_alpha'], name='mu') | pandas.Series |
import pandas as pd
import os
import shutil
import collections
def allFile(path):
res = []
for root, dirs, files in os.walk(path):
for file in files:
res.append(os.path.join(root, file))
return res
def allDir(path):
res = []
for root, dirs, files in os.walk(path):
for Dir in dirs:
res.append(os.path.join(root, Dir))
return res
def selectFiles(files, condition):
newfiles = []
for file in files:
if condition in file:
newfiles.append(file)
# print(file)
return newfiles
def toDF(path):
files = allFile(path)
dfs= pd.DataFrame()
for file in files:
if (".new.xlsx" in file) and ("$" not in file):
df = pd.read_excel(file).iloc[:,0:6]
dfs = dfs.append(df)
return dfs.dropna(axis=0)
def nameRule(name):
return name.replace("(","").replace(")","").replace(" ","").replace("-","").replace("_","")
def makeFile(files, extension, i, blackList, row, changeLog):
flag = True
change = False
for file in files:
if nameRule(row.ํ์ผ๋ช
) == nameRule(os.path.basename(file).replace(extension, '')):
flag = False
if file not in blackList:
change = True
blackList.append(file)
justName = file.split(os.path.sep)[-2] + '_' + str(i).zfill(5)
fileName = justName + extension
changeLog[row.๋ํ๋ฒํธ] = file.split(os.path.sep)[-2]
changeLog[row.ํ์ผ๋ช
] = justName
fullName = os.path.join(os.path.dirname(file), fileName)
newName = fullName.replace('20200705_Flitto_Rantacar_samples','Filtto_Rantacar')
shutil.copy(file, newName)
break
if flag:
print(row.ํ์ผ๋ช
+ ' : ' + extension + 'not found')
return [blackList, change, changeLog]
def makeTxt(files, extension, i, blackList, row, en):
flag = True
for file in files:
if nameRule(row.ํ์ผ๋ช
) == nameRule(os.path.basename(file).replace(extension, '')):
flag = False
if file not in blackList:
blackList.append(file)
fileName = file.split(os.path.sep)[-2] + '_' + str(i).zfill(5)
koName = os.path.join(os.path.dirname(file), fileName + '_ko.txt').replace('20200705_Flitto_Rantacar_samples','Filtto_Rantacar')
enName = os.path.join(os.path.dirname(file), fileName + '_en.txt').replace('20200705_Flitto_Rantacar_samples','Filtto_Rantacar')
shutil.copy(file, koName)
f = open(enName, 'wt')
f.write(en[str(row.ํ์ผ๋ช
)])
break
if flag:
print(row.ํ์ผ๋ช
+ ' : ' + extension + 'not found')
return blackList
def makeAll(df):
pcmFiles = selectFiles(allFile(os.getcwd()), '.pcm')
txtFiles = selectFiles(allFile(os.getcwd()), '.txt')
changeLog = collections.defaultdict(str)
i = 0
pcmBlackList = []
txtBlackList = []
en = enDict(df)
for row in df.itertuples():
[pcmBlackList, change, changeLog] = makeFile(pcmFiles, '.pcm', i, pcmBlackList, row, changeLog)
txtBlackList = makeTxt(txtFiles, '.txt', i, txtBlackList, row, en)
if change:
i+=1
return changeLog
def enDict(df):
en = dict()
for row in df.itertuples():
fn = str(row.ํ์ผ๋ช
)
txt = str(row.์์ด)
if fn not in en.keys():
en[fn] = txt
else:
en[fn] += (' ' + txt)
return en
def makeDirs(dirs):
for dr in dirs:
os.makedirs(dr.replace('20200705_Flitto_Rantacar_samples','Filtto_Rantacar'))
def makeNewDf(df, changeLog):
newDf = | pd.DataFrame(columns = ['๋ํ๋ฒํธ','๋ฏธ์
์ ๋ชฉ','๋ฐํ์ ๊ตฌ๋ถ','ํ๊ตญ์ด','์์ด','ํ์ผ๋ช
']) | pandas.DataFrame |
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
import pandas as pd
from pandas import Series, date_range
import pandas._testing as tm
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = | Series(expected_value) | pandas.Series |
import pandas as pd
import acquire as a
import matplotlib.pyplot as plt
import seaborn as sns
##########################################################################################
# My Prepare Functions
##########################################################################################
def set_index(df, date_col):
'''
Converts column to datetime and sets as the index
'''
df[date_col] = pd.to_datetime(df[date_col])
df = df.set_index(date_col).sort_index()
return df
def visualize(df, x, y, title):
'''
plots a scatter plot of x vs y, and then a pairplot of the complete df
'''
plt.scatter(x=x, y=y)
plt.title('title')
plt.show()
sns.pairplot(df)
def sales_total():
df['sales_total'] = df.sale_amount * df.item_price
return df
def create_date_columns(df, date_types, date_col):
'''
'year','month','day','hour','week','weekday','weekday_name','quarter'
create columns of these date types using date index or column
date_col must be set to a pandas datetime
'''
# if date columns has already been set to index
if date_col == 'index':
for x in date_types:
# will add the date column for every date type in the list
if x == 'year':
df['year'] = df.index.year
if x == 'month':
df['month'] = df.index.month
if x == 'day':
df['day'] = df.index.day
if x == 'hour':
df['hour'] = df.index.hour
if x == 'week':
df['week'] = df.index.week
if x == 'weekday':
df['weekday'] = df.index.weekday
if x == 'weekday_name':
df['weekday_name'] = df.index.day_name()
if x == 'quarter':
df['quarter'] = df.index.quarter
# if date column has not yet been set to index
else:
for x in date_types:
# will add the date column for every date type in the list
if x == 'year':
df['year'] = df[date_col].dt.year
if x == 'month':
df['month'] = df[date_col].dt.month
if x == 'day':
df['day'] = df[date_col].dt.day
if x == 'hour':
df['hour'] = df[date_col].dt.hour
if x == 'week':
df['week'] = df[date_col].dt.week
if x == 'weekday':
df['weekday'] = df[date_col].dt.weekday
if x == 'weekday_name':
df['weekday_name'] = df[date_col].dt.day_name()
if x == 'quarter':
df['quarter'] = df[date_col].dt.quarter
return df
def sales_total():
'''
creates a new column for sales total
'''
df['sales_total'] = df.sale_amount * df.item_price
return df
##########################################################################################
# Preparation of Zach's Sales Data
##########################################################################################
def prep_sales():
'''
In order to run this function: the function complete_data must have been run.
This function takes output of complete_data from the acquire.py function, preps, and returns the dataframe for exploration.
'''
# Creates dataframe from complete_data function in acquire.py
df = a.complete_data(cached=True)
# sale_date column is converted to datetime and set as the index
df.sale_date = pd.to_datetime(df.sale_date)
df.set_index(df.sale_date, inplace=True)
# Create the columns 'month' and 'day_of_week'
df['month'] = df.index.month
df['day_of_week'] = df.index.day_name()
# Create 'sale_total' column
df['sales_total'] = df.sale_amount * df.item_price
return df
##########################################################################################
# Preparation of Germany Energy Consumption Data
##########################################################################################
def prep_germany(cached=False):
'''
This function pulls and preps the Germany Energy Consumption dataframe for exploration
if cached == False: collects the csv from the url
if cached == True: pulls the already saved dataframe
'''
if cached == False:
# url to opsd_germany_daily.csv
url = 'https://raw.githubusercontent.com/jenfly/opsd/master/opsd_germany_daily.csv'
# uses pull_csv function from acquire.py to collect the dataset
df = a.pull_csv(url)
# caches the dataset as a csv
df = pd.to_csv('opsd_germany_daily.csv')
# cached == True
else:
# pulls csv as data from
df = pd.read_csv('opsd_germany_daily.csv')
# Lowercases the columns and renames 'wind+solar' columns to 'wind_and_solar'
df.columns = df.columns.str.lower()
df.rename(columns={'wind+solar': 'wind_and_solar'}, inplace=True)
# Conver date to datetime and set date as index
df.date = pd.to_datetime(df.date)
df.set_index(df.date, inplace=True)
# Creates the month and year columns
df['month'] = df.index.month
df['year'] = df.index.year
# Fills nulls with 0
df.fillna(0, inplace=True)
return df
##########################################################################################
# Zero's and NULLs
##########################################################################################
#----------------------------------------------------------------------------------------#
###### Easley
def prep_store_data(df):
df.sale_date = | pd.to_datetime(df.sale_date, format='%a, %d %b %Y %H:%M:%S %Z') | pandas.to_datetime |
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
from sklearn.metrics import accuracy_score
# Import the data
train = pd.read_csv('./data/train.csv')
test = pd.read_csv('./data/test.csv')
# Process the data
train['Sex'] = train['Sex'].map({'female': 1, 'male': 0})
train['Embarked'] = train['Embarked'].map({'S': 0, 'C': 1, 'Q': 3})
df = pd.DataFrame(train['Name'].str.split(',').tolist())
df2 = pd.DataFrame(df[1].str.split('.').tolist())
train['Title'] = df2[0]
labels, levels = pd.factorize(train['Title'])
titles = pd.DataFrame(labels)
train['Title'] = titles
train = train.drop(['Name', 'Cabin', 'Ticket'], 1)
test['Sex'] = test['Sex'].map({'female': 1, 'male': 0})
test['Embarked'] = test['Embarked'].map({'S': 0, 'C': 1, 'Q': 3})
tdf = pd.DataFrame(test['Name'].str.split(',').tolist())
tdf2 = pd.DataFrame(tdf[1].str.split('.').tolist())
test['Title'] = tdf2[0]
tlabels, levels = pd.factorize(test['Title'])
ttitles = | pd.DataFrame(tlabels) | pandas.DataFrame |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
| Timestamp(x) | pandas.Timestamp |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas.core import common as com
from pandas.types.common import (is_integer,
is_float,
is_object_dtype,
is_integer_dtype,
is_float_dtype,
is_scalar,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_period_dtype,
is_bool_dtype,
pandas_dtype,
_ensure_int64,
_ensure_object)
from pandas.types.dtypes import PeriodDtype
from pandas.types.generic import ABCSeries
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
import pandas._period as period
from pandas._period import (Period, IncompatibleFrequency,
get_period_field_arr, _validate_end_alias,
_quarter_to_myear)
from pandas.core.base import _shared_docs
from pandas.indexes.base import _index_shared_docs, _ensure_index
from pandas import compat
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate_kwarg)
from pandas.lib import infer_dtype
import pandas.tslib as tslib
from pandas.compat import zip, u
import pandas.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self._values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self._values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = getattr(self._values, opname)(other._values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is tslib.NaT:
result = np.empty(len(self._values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
func = getattr(self._values, opname)
result = func(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return wrapper
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'qyear', 'freq',
'days_in_month', 'daysinmonth',
'to_timestamp', 'asfreq', 'start_time', 'end_time',
'is_leap_year']
_is_numeric_dtype = False
_infer_as_myclass = True
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, dtype=None,
**kwargs):
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if name is None and hasattr(data, 'name'):
name = data.name
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
msg = 'specified freq and dtype are different'
raise IncompatibleFrequency(msg)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=copy)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if not isinstance(data, (np.ndarray, PeriodIndex,
DatetimeIndex, Int64Index)):
if is_scalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = _ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq) for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = _ensure_object(data)
if freq is None:
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data._values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data._values,
base1, base2, 1)
else:
if is_object_dtype(data):
inferred = infer_dtype(data)
if inferred == 'integer':
data = data.astype(np.int64)
if freq is None and is_object_dtype(data):
# must contain Period instance and thus extract ordinals
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
if freq is None:
msg = 'freq not specified and cannot be inferred'
raise ValueError(msg)
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
data = _ensure_object(data)
data = period.extract_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not is_integer_dtype(values):
values = np.array(values, copy=False)
if (len(values) > 0 and is_float_dtype(values)):
raise TypeError("PeriodIndex can't take floats")
else:
return cls(values, name=name, freq=freq, **kwargs)
values = np.array(values, dtype='int64', copy=False)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified')
result.freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
def _shallow_copy(self, values=None, **kwargs):
if kwargs.get('freq') is None:
# freq must be provided
kwargs['freq'] = self.freq
if values is None:
values = self._values
return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
def __contains__(self, key):
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
try:
self.get_loc(key)
return True
except Exception:
return False
return False
@property
def asi8(self):
return self._values.view('i8')
@cache_readonly
def _int64index(self):
return Int64Index(self.asi8, name=self.name, fastpath=True)
@property
def values(self):
return self.asobject.values
@property
def _values(self):
return self._data
def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
return self.asobject.values
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
pass
elif (func is np.subtract):
name = self.name
left = context[1][0]
right = context[1][1]
if (isinstance(left, PeriodIndex) and
isinstance(right, PeriodIndex)):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return PeriodIndex(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
"""
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._values[mask].searchsorted(where_idx._values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._values < self._values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
if copy:
return self._int64index.copy()
else:
return self._int64index
elif is_datetime64_dtype(dtype):
return self.to_timestamp(how=how)
elif is_datetime64tz_dtype(dtype):
return self.to_timestamp(how=how).tz_localize(dtype.tz)
elif | is_period_dtype(dtype) | pandas.types.common.is_period_dtype |
import numpy as np
from pandas import (
DataFrame,
Index,
RangeIndex,
Series,
)
import pandas._testing as tm
# -----------------------------------------------------------------------------
# Copy/view behaviour for the values that are set in a DataFrame
def test_set_column_with_array():
# Case: setting an array as a new column (df[col] = arr) copies that data
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
arr = np.array([1, 2, 3], dtype="int64")
df["c"] = arr
# the array data is copied
assert not np.shares_memory(df["c"].values, arr)
# and thus modifying the array does not modify the DataFrame
arr[0] = 0
tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
def test_set_column_with_series(using_copy_on_write):
# Case: setting a series as a new column (df[col] = s) copies that data
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
ser = Series([1, 2, 3])
df["c"] = ser
if using_copy_on_write:
# with CoW we can delay the copy
assert np.shares_memory(df["c"].values, ser.values)
else:
# the series data is copied
assert not np.shares_memory(df["c"].values, ser.values)
# and modifying the series does not modify the DataFrame
ser.iloc[0] = 0
assert ser.iloc[0] == 0
tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
def test_set_column_with_index(using_copy_on_write):
# Case: setting an index as a new column (df[col] = idx) copies that data
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
idx = Index([1, 2, 3])
df["c"] = idx
# the index data is copied
assert not np.shares_memory(df["c"].values, idx.values)
# and thus modifying the index does not modify the DataFrame
idx.values[0] = 0
tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
# however, in case of a RangeIndex, we currently don't copy the cached
# "materialized" values
idx = RangeIndex(1, 4)
arr = idx.values
df["d"] = idx
if using_copy_on_write:
assert not np.shares_memory(df["d"].values, arr)
arr[0] = 0
tm.assert_series_equal(df["d"], Series([1, 2, 3], name="d"))
else:
assert np.shares_memory(df["d"].values, arr)
arr[0] = 0
tm.assert_series_equal(df["d"], Series([0, 2, 3], name="d"))
def test_set_columns_with_dataframe(using_copy_on_write):
# Case: setting a DataFrame as new columns copies that data
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
df[["c", "d"]] = df2
if using_copy_on_write:
# with CoW we can delay the copy
assert np.shares_memory(df["c"].values, df2["c"].values)
else:
# the data is copied
assert not np.shares_memory(df["c"].values, df2["c"].values)
# and modifying the set DataFrame does not modify the original DataFrame
df2.iloc[0, 0] = 0
tm.assert_series_equal(df["c"], | Series([7, 8, 9], name="c") | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.