prompt
stringlengths
19
1.03M
completion
stringlengths
4
2.12k
api
stringlengths
8
90
"""functions for generating fake formatted output, for a dummy API.""" import spotipy import spotipy.util as util from spotipy.oauth2 import SpotifyClientCredentials import spotipy.oauth2 as oauth2 from decouple import config import pandas as pd SPOTIFY_CLIENT_ID = config('SPOTIFY_CLIENT_ID') SPOTIFY_CLIENT_SECRET = config('SPOTIFY_CLIENT_SECRET') credentials = oauth2.SpotifyClientCredentials( client_id=SPOTIFY_CLIENT_ID, client_secret=SPOTIFY_CLIENT_SECRET) token = credentials.get_access_token() spotify = spotipy.Spotify(auth=token) def get_ten_tracks(): """gets ten tracks from the spotify API, in the format they'll be served to WEB.""" artist_name = [] track_name = [] #popularity = [] track_id = [] track_results = spotify.search(q='year:2018 AND tag:hipster', limit=10, offset=0, market='US') for i, t in enumerate(track_results['tracks']['items']): artist_name.append(t['artists'][0]['name']) track_name.append(t['name']) track_id.append(t['id']) #popularity.append(t['popularity']) df_tracks =
pd.DataFrame({'artist_name':artist_name,'track_name':track_name,'track_id':track_id})
pandas.DataFrame
import numpy as np import scipy.stats as sps import pandas as pd from tqdm import tqdm_notebook from collections import defaultdict from functools import partial import itertools import matplotlib.pyplot as plt import matplotlib.colors import mpl_toolkits.mplot3d as plt3d import seaborn as sns from abc import ABC, abstractmethod class ExperimentHandler: """ Класс, осуществляющий эксперименты из описания выше. Интерфейс состоит из одной функции run, которая возвращает p-значения до и после коррекции (каждым из способов), а также количественные меры качества классификации — FDR, FWER и мощность. """ def __init__(self, theta_list, Sigma, cov_mx_description, alpha, sample_size, n_runs, random_seed, criterion, correction_methods, compute_fwer, compute_fdr, compute_power): """ :param theta_list: \theta_0 и \theta_1 из описания :param Sigma: матрица ковариаций :param cov_mx_description: её описание :param alpha: уровень значимости :param n_runs: число итераций эксперимента :param sample_size: размер выборки на каждом шаге генерации :param random_seed: для воспроизводимости результатов :param correction_methods: используемые методы МПГ-коррекции :param criterion: используемый статистический тест :param compute_X: функция, которая вычисляет FWER | FDR | мощность """ self.theta_list = theta_list self.Sigma = Sigma self.cov_mx_description = cov_mx_description self.alpha = alpha self.n_runs = n_runs self.sample_size = sample_size self.random_seed = random_seed self.correction_methods = correction_methods self.criterion = criterion self._compute_fwer = compute_fwer self._compute_fdr = compute_fdr self._compute_power = compute_power def _theta_from_config(self, alt_mask): """ Генерирует вектор средних theta по известной маске alt_mask, которая кодирует собой конфигурацию: если на позиции i стоит True, то эта компонента берётся из theta_1, иначе из theta_0. """ theta = self.theta_list[0].copy() theta[alt_mask] = self.theta_list[1][alt_mask] return theta def _sample(self, mean_vec, shape): """ Семплировать случайные вектора из N(mean_vec, Sigma) для всех экспериментов сразу. Т.е. на выходе должен получиться тензор размерностей, заданных аргументом shape. :return: тензор выборок и распределение, из которого они брались """ rv = sps.multivariate_normal( mean=mean_vec, cov=self.Sigma, allow_singular=True, seed=self.random_seed ) samples = rv.rvs(size=shape) assert samples.shape == (*np.atleast_1d(shape), 3), \ "Некорректный размер тензора samples." \ f"Нужно {(*np.atleast_1d(shape), 3)}, " \ f"а пытаются вернуть {samples.shape}" return samples, rv def _test_hypotheses(self, samples): """ :param samples: Выборки, сгенеррованные в данном эксперименте при какой-то конкретной конфигурации. 3D-тензор размерностей (n_runs, sample_size, 3). :return: pvalues, матрица p-значений для всех трёх гипотез сразу. Имеет размерность (n_runs, 3) """ assert samples.shape == (self.n_runs, self.sample_size, 3), \ "Некорректный размер тензора samples." \ f"Нужно {(self.n_runs, self.sample_size, 3)}, " \ f"пытаются передать {samples.shape}" pvalues = self.criterion( samples, theta_0=self.theta_list[0], Sigma=self.Sigma ) assert np.all((0 <= pvalues) & (pvalues <= 1)), \ "Какие-то из p-значений не лежат в диапазоне [0, 1]" assert pvalues.shape == (self.n_runs, 3), \ "Некорректный размер матрицы pvalues." \ f"Нужно {(self.n_runs, 3)}, а по факту {pvalues.shape}" return pvalues def _plot_density(self, alt_mask, ax, n_pts=1000): """ Отрисовать 3D-график с семплированными точками, окрашенными согласно теоретической плотности. """ samples, rv = {}, {} theta_0 = self.theta_list[0] samples["theta_0"], rv["theta_0"] = self._sample(theta_0, shape=n_pts) theta = self._theta_from_config(alt_mask) samples["theta"], rv["theta"] = self._sample(theta, shape=n_pts) colors = {"theta_0": "#00CC66", "theta": "#FF3300"} for mean_annot in ["theta_0", "theta"]: curr_sample = samples[mean_annot] ax.scatter( curr_sample[:, 0], curr_sample[:, 1], curr_sample[:, 2], c=colors[mean_annot], alpha=0.1 ) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_zticklabels([]) def _plot_marginal_densities(self, alt_mask, ax, cmap, n_pts=1000): sns.set(font_scale=1.5) h0_samples, h0_rv = self._sample(self.theta_list[0], shape=n_pts) theta = self._theta_from_config(alt_mask) samples, rv = self._sample(theta, shape=n_pts) samples_df = pd.DataFrame(np.ravel(samples), columns=["Значение"]) samples_df["Распределение"] = np.full( samples_df.shape[0], r"$\mathcal{N}(\theta, \Sigma)$" ) h0_samples_df = pd.DataFrame(np.ravel(h0_samples), columns=["Значение"]) h0_samples_df["Распределение"] = np.full( h0_samples_df.shape[0], r"$\mathcal{N}(\theta_0, \Sigma)$" ) joint_samples_df =
pd.concat([h0_samples_df, samples_df])
pandas.concat
# this script calculates occurrence percentages across inputs and functions. # occurrence percentage: # number of occurrences of model component/sum of all occurrences of all model components import pickle import os import operator import pandas as pd import numpy as np from collections import Counter dir_path = os.path.dirname(os.path.realpath(__file__)) file_path = os.path.join(dir_path,'sc_all_results.pickle') with open(file_path,'rb') as f: data = pickle.load(f) funcs = ['lt','ite','vadd','vsub','vmul','vdiv','vneg','vsin','vcos'] inputs = data['seed_0_results']['inputs'] func_list = [] input_list = [] term_list = [] func_dicts = {} input_dicts = {} training_error = [] test_error = [] complexity = [] for i in data: for j,tree in enumerate(data[i]['labels']): # # print(tree) # a tree training_error.append(data[i]['train_mse'][j]) test_error.append(data[i]['test_mse'][j]) complexity.append(data[i]['complexity'][j]) model_id = 'seed_{0}_model_{1}'.format(i[5:7],j) print(model_id) # model_terms = [] model_funcs = [] model_inputs = [] for k in tree: for l in k: if l in funcs: func_list.append(l) model_funcs.append(l) elif l in inputs: input_list.append(l) model_inputs.append(l) else: term_list.append(float(l)) func_counts = [] input_counts = [] for l in funcs: if l in model_funcs: func_counts.append(model_funcs.count(l)) else: func_counts.append(0) for l in inputs: if l in model_inputs: input_counts.append(model_inputs.count(l)) else: input_counts.append(0) if not ((input_counts == 0) or (np.sum(input_counts) == 0)): input_dicts[model_id] = np.zeros((len(inputs),)) else: input_dicts[model_id] = np.divide(input_counts,np.sum(input_counts)) if not ((func_counts == 0) or (np.sum(func_counts) == 0)): func_dicts[model_id] = np.zeros((len(funcs),)) else: func_dicts[model_id] = np.divide(func_counts,np.sum(func_counts)) input_df = pd.DataFrame(input_dicts,index=inputs) func_df = pd.DataFrame(func_dicts,index=funcs) print(input_df) print(func_df) frames = [input_df,func_df] infn_df =
pd.concat(frames)
pandas.concat
import pandas as pd import tkinter from tkinter import filedialog import os # 导入EXCEL文件 # 在需要匹配的表格中增加一列匹配字段 print('请选择完整名单所在的.xlsx文件。 Please select the .xlsx file where the Full List is located.\n') root1 = tkinter.Tk() root1.withdraw() filePath1 = filedialog.askopenfilename() excel_one = pd.read_excel(filePath1,header=None,names=['名称']) print('请选择可能缺失人员名单所在的.xlsx文件。 Please select the .xlsx file where the MoYu List is located.\n') root2 = tkinter.Tk() root2.withdraw() filePath2 = filedialog.askopenfilename() excel_two = pd.read_excel(filePath2,header=None,names=['名称','标记']) # 填充匹配字段值 excel_two['标记'] = '存在' #匹配两个表格数据 results =
pd.merge(excel_one,excel_two,how='left',on='名称')
pandas.merge
import pandas as pd from pandas.testing import assert_series_equal, assert_frame_equal from ..cheval import LinkedDataFrame vehicles_data = { 'household_id': [0, 0, 1, 2, 3], 'vehicle_id': [0, 1, 0, 0, 0], 'manufacturer': ['Honda', 'Ford', 'Ford', 'Toyota', 'Honda'], 'model_year': [2009, 2005, 2015, 2011, 2013], 'km_travelled': [103236, 134981, 19015, 75795, 54573] } households_data = { 'household_id': [0, 1, 2, 3], 'dwelling_type': ['house', 'apartment', 'house', 'house'], 'drivers': [4, 1, 2, 3] } def test_link_to(): vehicles = LinkedDataFrame(vehicles_data) households = LinkedDataFrame(households_data) vehicles.link_to(households, 'household', on='household_id') households.link_to(vehicles, 'vehicles', on='household_id') test_result = households.vehicles.sum("km_travelled") expected_result = pd.Series({0: 238217, 1: 19015, 2: 75795, 3: 54573}) assert_series_equal(test_result, expected_result) def test_slicing(): vehicles = LinkedDataFrame(vehicles_data) households = LinkedDataFrame(households_data) vehicles.link_to(households, 'household', on='household_id') households.link_to(vehicles, 'vehicles', on='household_id') mask = vehicles['household_id'] == 0 vehicles_subset = vehicles.loc[mask].copy() vehicles_subset['dwelling_type'] = vehicles_subset.household.dwelling_type test_result = vehicles_subset['dwelling_type'] expected_result =
pd.Series({0: 'house', 1: 'house'}, name='dwelling_type')
pandas.Series
from datetime import timedelta from functools import partial import itertools from parameterized import parameterized import numpy as np from numpy.testing import assert_array_equal, assert_almost_equal import pandas as pd from toolz import merge from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor from zipline.pipeline.common import ( EVENT_DATE_FIELD_NAME, FISCAL_QUARTER_FIELD_NAME, FISCAL_YEAR_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME, ) from zipline.pipeline.data import DataSet from zipline.pipeline.data import Column from zipline.pipeline.domain import EquitySessionDomain from zipline.pipeline.loaders.earnings_estimates import ( NextEarningsEstimatesLoader, NextSplitAdjustedEarningsEstimatesLoader, normalize_quarters, PreviousEarningsEstimatesLoader, PreviousSplitAdjustedEarningsEstimatesLoader, split_normalized_quarters, ) from zipline.testing.fixtures import ( WithAdjustmentReader, WithTradingSessions, ZiplineTestCase, ) from zipline.testing.predicates import assert_equal from zipline.testing.predicates import assert_frame_equal from zipline.utils.numpy_utils import datetime64ns_dtype from zipline.utils.numpy_utils import float64_dtype import pytest class Estimates(DataSet): event_date = Column(dtype=datetime64ns_dtype) fiscal_quarter = Column(dtype=float64_dtype) fiscal_year = Column(dtype=float64_dtype) estimate = Column(dtype=float64_dtype) class MultipleColumnsEstimates(DataSet): event_date = Column(dtype=datetime64ns_dtype) fiscal_quarter = Column(dtype=float64_dtype) fiscal_year = Column(dtype=float64_dtype) estimate1 = Column(dtype=float64_dtype) estimate2 = Column(dtype=float64_dtype) def QuartersEstimates(announcements_out): class QtrEstimates(Estimates): num_announcements = announcements_out name = Estimates return QtrEstimates def MultipleColumnsQuartersEstimates(announcements_out): class QtrEstimates(MultipleColumnsEstimates): num_announcements = announcements_out name = Estimates return QtrEstimates def QuartersEstimatesNoNumQuartersAttr(num_qtr): class QtrEstimates(Estimates): name = Estimates return QtrEstimates def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date): """ Given a list of tuples of new data we get for each sid on each critical date (when information changes), create a DataFrame that fills that data through a date range ending at `end_date`. """ df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"]) df = df.pivot_table( columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False ) df = df.reindex(pd.date_range(start_date, end_date)) # Index name is lost during reindex. df.index = df.index.rename("knowledge_date") df["at_date"] = end_date.tz_localize("utc") df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill() new_sids = set(sids) - set(df.columns) df = df.reindex(columns=df.columns.union(new_sids)) return df class WithEstimates(WithTradingSessions, WithAdjustmentReader): """ ZiplineTestCase mixin providing cls.loader and cls.events as class level fixtures. Methods ------- make_loader(events, columns) -> PipelineLoader Method which returns the loader to be used throughout tests. events : pd.DataFrame The raw events to be used as input to the pipeline loader. columns : dict[str -> str] The dictionary mapping the names of BoundColumns to the associated column name in the events DataFrame. make_columns() -> dict[BoundColumn -> str] Method which returns a dictionary of BoundColumns mapped to the associated column names in the raw data. """ # Short window defined in order for test to run faster. START_DATE = pd.Timestamp("2014-12-28", tz="utc") END_DATE = pd.Timestamp("2015-02-04", tz="utc") @classmethod def make_loader(cls, events, columns): raise NotImplementedError("make_loader") @classmethod def make_events(cls): raise NotImplementedError("make_events") @classmethod def get_sids(cls): return cls.events[SID_FIELD_NAME].unique() @classmethod def make_columns(cls): return { Estimates.event_date: "event_date", Estimates.fiscal_quarter: "fiscal_quarter", Estimates.fiscal_year: "fiscal_year", Estimates.estimate: "estimate", } def make_engine(self, loader=None): if loader is None: loader = self.loader return SimplePipelineEngine( lambda x: loader, self.asset_finder, default_domain=EquitySessionDomain( self.trading_days, self.ASSET_FINDER_COUNTRY_CODE, ), ) @classmethod def init_class_fixtures(cls): cls.events = cls.make_events() cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids() cls.ASSET_FINDER_EQUITY_SYMBOLS = [ "s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS ] # We need to instantiate certain constants needed by supers of # `WithEstimates` before we call their `init_class_fixtures`. super(WithEstimates, cls).init_class_fixtures() cls.columns = cls.make_columns() # Some tests require `WithAdjustmentReader` to be set up by the time we # make the loader. cls.loader = cls.make_loader( cls.events, {column.name: val for column, val in cls.columns.items()} ) class WithOneDayPipeline(WithEstimates): """ ZiplineTestCase mixin providing cls.events as a class level fixture and defining a test for all inheritors to use. Attributes ---------- events : pd.DataFrame A simple DataFrame with columns needed for estimates and a single sid and no other data. Tests ------ test_wrong_num_announcements_passed() Tests that loading with an incorrect quarter number raises an error. test_no_num_announcements_attr() Tests that the loader throws an AssertionError if the dataset being loaded has no `num_announcements` attribute. """ @classmethod def make_columns(cls): return { MultipleColumnsEstimates.event_date: "event_date", MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter", MultipleColumnsEstimates.fiscal_year: "fiscal_year", MultipleColumnsEstimates.estimate1: "estimate1", MultipleColumnsEstimates.estimate2: "estimate2", } @classmethod def make_events(cls): return pd.DataFrame( { SID_FIELD_NAME: [0] * 2, TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-10"), pd.Timestamp("2015-01-20"), ], "estimate1": [1.0, 2.0], "estimate2": [3.0, 4.0], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: [2015, 2015], } ) @classmethod def make_expected_out(cls): raise NotImplementedError("make_expected_out") @classmethod def init_class_fixtures(cls): super(WithOneDayPipeline, cls).init_class_fixtures() cls.sid0 = cls.asset_finder.retrieve_asset(0) cls.expected_out = cls.make_expected_out() def test_load_one_day(self): # We want to test multiple columns dataset = MultipleColumnsQuartersEstimates(1) engine = self.make_engine() results = engine.run_pipeline( Pipeline({c.name: c.latest for c in dataset.columns}), start_date=pd.Timestamp("2015-01-15", tz="utc"), end_date=pd.Timestamp("2015-01-15", tz="utc"), ) assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1)) class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase): """ Tests that previous quarter loader correctly breaks if an incorrect number of quarters is passed. """ @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) @classmethod def make_expected_out(cls): return pd.DataFrame( { EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"), "estimate1": 1.0, "estimate2": 3.0, FISCAL_QUARTER_FIELD_NAME: 1.0, FISCAL_YEAR_FIELD_NAME: 2015.0, }, index=pd.MultiIndex.from_tuples( ((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),) ), ) class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase): """ Tests that next quarter loader correctly breaks if an incorrect number of quarters is passed. """ @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) @classmethod def make_expected_out(cls): return pd.DataFrame( { EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"), "estimate1": 2.0, "estimate2": 4.0, FISCAL_QUARTER_FIELD_NAME: 2.0, FISCAL_YEAR_FIELD_NAME: 2015.0, }, index=pd.MultiIndex.from_tuples( ((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),) ), ) dummy_df = pd.DataFrame( {SID_FIELD_NAME: 0}, columns=[ SID_FIELD_NAME, TS_FIELD_NAME, EVENT_DATE_FIELD_NAME, FISCAL_QUARTER_FIELD_NAME, FISCAL_YEAR_FIELD_NAME, "estimate", ], index=[0], ) class WithWrongLoaderDefinition(WithEstimates): """ ZiplineTestCase mixin providing cls.events as a class level fixture and defining a test for all inheritors to use. Attributes ---------- events : pd.DataFrame A simple DataFrame with columns needed for estimates and a single sid and no other data. Tests ------ test_wrong_num_announcements_passed() Tests that loading with an incorrect quarter number raises an error. test_no_num_announcements_attr() Tests that the loader throws an AssertionError if the dataset being loaded has no `num_announcements` attribute. """ @classmethod def make_events(cls): return dummy_df def test_wrong_num_announcements_passed(self): bad_dataset1 = QuartersEstimates(-1) bad_dataset2 = QuartersEstimates(-2) good_dataset = QuartersEstimates(1) engine = self.make_engine() columns = { c.name + str(dataset.num_announcements): c.latest for dataset in (bad_dataset1, bad_dataset2, good_dataset) for c in dataset.columns } p = Pipeline(columns) err_msg = ( r"Passed invalid number of quarters -[0-9],-[0-9]; " r"must pass a number of quarters >= 0" ) with pytest.raises(ValueError, match=err_msg): engine.run_pipeline( p, start_date=self.trading_days[0], end_date=self.trading_days[-1], ) def test_no_num_announcements_attr(self): dataset = QuartersEstimatesNoNumQuartersAttr(1) engine = self.make_engine() p = Pipeline({c.name: c.latest for c in dataset.columns}) with pytest.raises(AttributeError): engine.run_pipeline( p, start_date=self.trading_days[0], end_date=self.trading_days[-1], ) class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase): """ Tests that previous quarter loader correctly breaks if an incorrect number of quarters is passed. """ @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase): """ Tests that next quarter loader correctly breaks if an incorrect number of quarters is passed. """ @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) options = [ "split_adjustments_loader", "split_adjusted_column_names", "split_adjusted_asof", ] class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase): """ Test class that tests that loaders break correctly when incorrectly instantiated. Tests ----- test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader) A test that checks that the loader correctly breaks when an unexpected column is passed in the list of split-adjusted columns. """ @classmethod def init_class_fixtures(cls): super(WithEstimates, cls).init_class_fixtures() @parameterized.expand( itertools.product( ( NextSplitAdjustedEarningsEstimatesLoader, PreviousSplitAdjustedEarningsEstimatesLoader, ), ) ) def test_extra_splits_columns_passed(self, loader): columns = { Estimates.event_date: "event_date", Estimates.fiscal_quarter: "fiscal_quarter", Estimates.fiscal_year: "fiscal_year", Estimates.estimate: "estimate", } with pytest.raises(ValueError): loader( dummy_df, {column.name: val for column, val in columns.items()}, split_adjustments_loader=self.adjustment_reader, split_adjusted_column_names=["estimate", "extra_col"], split_adjusted_asof=pd.Timestamp("2015-01-01"), ) class WithEstimatesTimeZero(WithEstimates): """ ZiplineTestCase mixin providing cls.events as a class level fixture and defining a test for all inheritors to use. Attributes ---------- cls.events : pd.DataFrame Generated dynamically in order to test inter-leavings of estimates and event dates for multiple quarters to make sure that we select the right immediate 'next' or 'previous' quarter relative to each date - i.e., the right 'time zero' on the timeline. We care about selecting the right 'time zero' because we use that to calculate which quarter's data needs to be returned for each day. Methods ------- get_expected_estimate(q1_knowledge, q2_knowledge, comparable_date) -> pd.DataFrame Retrieves the expected estimate given the latest knowledge about each quarter and the date on which the estimate is being requested. If there is no expected estimate, returns an empty DataFrame. Tests ------ test_estimates() Tests that we get the right 'time zero' value on each day for each sid and for each column. """ # Shorter date range for performance END_DATE = pd.Timestamp("2015-01-28", tz="utc") q1_knowledge_dates = [ pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-04"), pd.Timestamp("2015-01-07"), pd.Timestamp("2015-01-11"), ] q2_knowledge_dates = [ pd.Timestamp("2015-01-14"), pd.Timestamp("2015-01-17"), pd.Timestamp("2015-01-20"), pd.Timestamp("2015-01-23"), ] # We want to model the possibility of an estimate predicting a release date # that doesn't match the actual release. This could be done by dynamically # generating more combinations with different release dates, but that # significantly increases the amount of time it takes to run the tests. # These hard-coded cases are sufficient to know that we can update our # beliefs when we get new information. q1_release_dates = [ pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-14"), ] # One day late q2_release_dates = [ pd.Timestamp("2015-01-25"), # One day early pd.Timestamp("2015-01-26"), ] @classmethod def make_events(cls): """ In order to determine which estimate we care about for a particular sid, we need to look at all estimates that we have for that sid and their associated event dates. We define q1 < q2, and thus event1 < event2 since event1 occurs during q1 and event2 occurs during q2 and we assume that there can only be 1 event per quarter. We assume that there can be multiple estimates per quarter leading up to the event. We assume that estimates will not surpass the relevant event date. We will look at 2 estimates for an event before the event occurs, since that is the simplest scenario that covers the interesting edge cases: - estimate values changing - a release date changing - estimates for different quarters interleaving Thus, we generate all possible inter-leavings of 2 estimates per quarter-event where estimate1 < estimate2 and all estimates are < the relevant event and assign each of these inter-leavings to a different sid. """ sid_estimates = [] sid_releases = [] # We want all permutations of 2 knowledge dates per quarter. it = enumerate( itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4) ) for sid, (q1e1, q1e2, q2e1, q2e2) in it: # We're assuming that estimates must come before the relevant # release. if ( q1e1 < q1e2 and q2e1 < q2e2 # All estimates are < Q2's event, so just constrain Q1 # estimates. and q1e1 < cls.q1_release_dates[0] and q1e2 < cls.q1_release_dates[0] ): sid_estimates.append( cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid) ) sid_releases.append(cls.create_releases_df(sid)) return pd.concat(sid_estimates + sid_releases).reset_index(drop=True) @classmethod def get_sids(cls): sids = cls.events[SID_FIELD_NAME].unique() # Tack on an extra sid to make sure that sids with no data are # included but have all-null columns. return list(sids) + [max(sids) + 1] @classmethod def create_releases_df(cls, sid): # Final release dates never change. The quarters have very tight date # ranges in order to reduce the number of dates we need to iterate # through when testing. return pd.DataFrame( { TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26"), ], "estimate": [0.5, 0.8], FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0], FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0], SID_FIELD_NAME: sid, } ) @classmethod def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid): return pd.DataFrame( { EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates, "estimate": [0.1, 0.2, 0.3, 0.4], FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0], FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0], TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2], SID_FIELD_NAME: sid, } ) def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date): return pd.DataFrame() def test_estimates(self): dataset = QuartersEstimates(1) engine = self.make_engine() results = engine.run_pipeline( Pipeline({c.name: c.latest for c in dataset.columns}), start_date=self.trading_days[1], end_date=self.trading_days[-2], ) for sid in self.ASSET_FINDER_EQUITY_SIDS: sid_estimates = results.xs(sid, level=1) # Separate assertion for all-null DataFrame to avoid setting # column dtypes on `all_expected`. if sid == max(self.ASSET_FINDER_EQUITY_SIDS): assert sid_estimates.isnull().all().all() else: ts_sorted_estimates = self.events[ self.events[SID_FIELD_NAME] == sid ].sort_values(TS_FIELD_NAME) q1_knowledge = ts_sorted_estimates[ ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1 ] q2_knowledge = ts_sorted_estimates[ ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2 ] all_expected = pd.concat( [ self.get_expected_estimate( q1_knowledge[ q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None) ], q2_knowledge[ q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None) ], date.tz_localize(None), ).set_index([[date]]) for date in sid_estimates.index ], axis=0, ) sid_estimates.index = all_expected.index.copy() assert_equal(all_expected[sid_estimates.columns], sid_estimates) class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date): # If our latest knowledge of q1 is that the release is # happening on this simulation date or later, then that's # the estimate we want to use. if ( not q1_knowledge.empty and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date ): return q1_knowledge.iloc[-1:] # If q1 has already happened or we don't know about it # yet and our latest knowledge indicates that q2 hasn't # happened yet, then that's the estimate we want to use. elif ( not q2_knowledge.empty and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date ): return q2_knowledge.iloc[-1:] return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date]) class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date): # The expected estimate will be for q2 if the last thing # we've seen is that the release date already happened. # Otherwise, it'll be for q1, as long as the release date # for q1 has already happened. if ( not q2_knowledge.empty and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date ): return q2_knowledge.iloc[-1:] elif ( not q1_knowledge.empty and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date ): return q1_knowledge.iloc[-1:] return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date]) class WithEstimateMultipleQuarters(WithEstimates): """ ZiplineTestCase mixin providing cls.events, cls.make_expected_out as class-level fixtures and self.test_multiple_qtrs_requested as a test. Attributes ---------- events : pd.DataFrame Simple DataFrame with estimates for 2 quarters for a single sid. Methods ------- make_expected_out() --> pd.DataFrame Returns the DataFrame that is expected as a result of running a Pipeline where estimates are requested for multiple quarters out. fill_expected_out(expected) Fills the expected DataFrame with data. Tests ------ test_multiple_qtrs_requested() Runs a Pipeline that calculate which estimates for multiple quarters out and checks that the returned columns contain data for the correct number of quarters out. """ @classmethod def make_events(cls): return pd.DataFrame( { SID_FIELD_NAME: [0] * 2, TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-10"), pd.Timestamp("2015-01-20"), ], "estimate": [1.0, 2.0], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: [2015, 2015], } ) @classmethod def init_class_fixtures(cls): super(WithEstimateMultipleQuarters, cls).init_class_fixtures() cls.expected_out = cls.make_expected_out() @classmethod def make_expected_out(cls): expected = pd.DataFrame( columns=[cls.columns[col] + "1" for col in cls.columns] + [cls.columns[col] + "2" for col in cls.columns], index=cls.trading_days, ) for (col, raw_name), suffix in itertools.product( cls.columns.items(), ("1", "2") ): expected_name = raw_name + suffix if col.dtype == datetime64ns_dtype: expected[expected_name] = pd.to_datetime(expected[expected_name]) else: expected[expected_name] = expected[expected_name].astype(col.dtype) cls.fill_expected_out(expected) return expected.reindex(cls.trading_days) def test_multiple_qtrs_requested(self): dataset1 = QuartersEstimates(1) dataset2 = QuartersEstimates(2) engine = self.make_engine() results = engine.run_pipeline( Pipeline( merge( [ {c.name + "1": c.latest for c in dataset1.columns}, {c.name + "2": c.latest for c in dataset2.columns}, ] ) ), start_date=self.trading_days[0], end_date=self.trading_days[-1], ) q1_columns = [col.name + "1" for col in self.columns] q2_columns = [col.name + "2" for col in self.columns] # We now expect a column for 1 quarter out and a column for 2 # quarters out for each of the dataset columns. assert_equal( sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values) ) assert_equal( self.expected_out.sort_index(axis=1), results.xs(0, level=1).sort_index(axis=1), ) class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) @classmethod def fill_expected_out(cls, expected): # Fill columns for 1 Q out for raw_name in cls.columns.values(): expected.loc[ pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp( "2015-01-11", tz="UTC" ), raw_name + "1", ] = cls.events[raw_name].iloc[0] expected.loc[ pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp( "2015-01-20", tz="UTC" ), raw_name + "1", ] = cls.events[raw_name].iloc[1] # Fill columns for 2 Q out # We only have an estimate and event date for 2 quarters out before # Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs # out. for col_name in ["estimate", "event_date"]: expected.loc[ pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp( "2015-01-10", tz="UTC" ), col_name + "2", ] = cls.events[col_name].iloc[1] # But we know what FQ and FY we'd need in both Q1 and Q2 # because we know which FQ is next and can calculate from there expected.loc[ pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"), FISCAL_QUARTER_FIELD_NAME + "2", ] = 2 expected.loc[ pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"), FISCAL_QUARTER_FIELD_NAME + "2", ] = 3 expected.loc[ pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"), FISCAL_YEAR_FIELD_NAME + "2", ] = 2015 return expected class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) @classmethod def fill_expected_out(cls, expected): # Fill columns for 1 Q out for raw_name in cls.columns.values(): expected[raw_name + "1"].loc[ pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp( "2015-01-19", tz="UTC" ) ] = cls.events[raw_name].iloc[0] expected[raw_name + "1"].loc[ pd.Timestamp("2015-01-20", tz="UTC") : ] = cls.events[raw_name].iloc[1] # Fill columns for 2 Q out for col_name in ["estimate", "event_date"]: expected[col_name + "2"].loc[ pd.Timestamp("2015-01-20", tz="UTC") : ] = cls.events[col_name].iloc[0] expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[ pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC") ] = 4 expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[ pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC") ] = 2014 expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[ pd.Timestamp("2015-01-20", tz="UTC") : ] = 1 expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[ pd.Timestamp("2015-01-20", tz="UTC") : ] = 2015 return expected class WithVaryingNumEstimates(WithEstimates): """ ZiplineTestCase mixin providing fixtures and a test to ensure that we have the correct overwrites when the event date changes. We want to make sure that if we have a quarter with an event date that gets pushed back, we don't start overwriting for the next quarter early. Likewise, if we have a quarter with an event date that gets pushed forward, we want to make sure that we start applying adjustments at the appropriate, earlier date, rather than the later date. Methods ------- assert_compute() Defines how to determine that results computed for the `SomeFactor` factor are correct. Tests ----- test_windows_with_varying_num_estimates() Tests that we create the correct overwrites from 2015-01-13 to 2015-01-14 regardless of how event dates were updated for each quarter for each sid. """ @classmethod def make_events(cls): return pd.DataFrame( { SID_FIELD_NAME: [0] * 3 + [1] * 3, TS_FIELD_NAME: [ pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12"), pd.Timestamp("2015-01-13"), ] * 2, EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-12"), pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-20"), pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-12"), pd.Timestamp("2015-01-20"), ], "estimate": [11.0, 12.0, 21.0] * 2, FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2, FISCAL_YEAR_FIELD_NAME: [2015] * 6, } ) @classmethod def assert_compute(cls, estimate, today): raise NotImplementedError("assert_compute") def test_windows_with_varying_num_estimates(self): dataset = QuartersEstimates(1) assert_compute = self.assert_compute class SomeFactor(CustomFactor): inputs = [dataset.estimate] window_length = 3 def compute(self, today, assets, out, estimate): assert_compute(estimate, today) engine = self.make_engine() engine.run_pipeline( Pipeline({"est": SomeFactor()}), start_date=pd.Timestamp("2015-01-13", tz="utc"), # last event date we have end_date=pd.Timestamp("2015-01-14", tz="utc"), ) class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase): def assert_compute(self, estimate, today): if today == pd.Timestamp("2015-01-13", tz="utc"): assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12])) assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12])) else: assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12])) assert_array_equal(estimate[:, 1], np.array([12, 12, 12])) @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase): def assert_compute(self, estimate, today): if today == pd.Timestamp("2015-01-13", tz="utc"): assert_array_equal(estimate[:, 0], np.array([11, 12, 12])) assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21])) else: assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21])) assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21])) @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) class WithEstimateWindows(WithEstimates): """ ZiplineTestCase mixin providing fixures and a test to test running a Pipeline with an estimates loader over differently-sized windows. Attributes ---------- events : pd.DataFrame DataFrame with estimates for 2 quarters for 2 sids. window_test_start_date : pd.Timestamp The date from which the window should start. timelines : dict[int -> pd.DataFrame] A dictionary mapping to the number of quarters out to snapshots of how the data should look on each date in the date range. Methods ------- make_expected_timelines() -> dict[int -> pd.DataFrame] Creates a dictionary of expected data. See `timelines`, above. Tests ----- test_estimate_windows_at_quarter_boundaries() Tests that we overwrite values with the correct quarter's estimate at the correct dates when we have a factor that asks for a window of data. """ END_DATE = pd.Timestamp("2015-02-10", tz="utc") window_test_start_date = pd.Timestamp("2015-01-05") critical_dates = [ pd.Timestamp("2015-01-09", tz="utc"), pd.Timestamp("2015-01-15", tz="utc"), pd.Timestamp("2015-01-20", tz="utc"), pd.Timestamp("2015-01-26", tz="utc"), pd.Timestamp("2015-02-05", tz="utc"), pd.Timestamp("2015-02-10", tz="utc"), ] # Starting date, number of announcements out. window_test_cases = list(itertools.product(critical_dates, (1, 2))) @classmethod def make_events(cls): # Typical case: 2 consecutive quarters. sid_0_timeline = pd.DataFrame( { TS_FIELD_NAME: [ cls.window_test_start_date, pd.Timestamp("2015-01-20"), pd.Timestamp("2015-01-12"), pd.Timestamp("2015-02-10"), # We want a case where we get info for a later # quarter before the current quarter is over but # after the split_asof_date to make sure that # we choose the correct date to overwrite until. pd.Timestamp("2015-01-18"), ], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-20"), pd.Timestamp("2015-01-20"), pd.Timestamp("2015-02-10"), pd.Timestamp("2015-02-10"), pd.Timestamp("2015-04-01"), ], "estimate": [100.0, 101.0] + [200.0, 201.0] + [400], FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4], FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 0, } ) # We want a case where we skip a quarter. We never find out about Q2. sid_10_timeline = pd.DataFrame( { TS_FIELD_NAME: [ pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12"), pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15"), ], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-22"), pd.Timestamp("2015-01-22"), pd.Timestamp("2015-02-05"), pd.Timestamp("2015-02-05"), ], "estimate": [110.0, 111.0] + [310.0, 311.0], FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2, FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 10, } ) # We want to make sure we have correct overwrites when sid quarter # boundaries collide. This sid's quarter boundaries collide with sid 0. sid_20_timeline = pd.DataFrame( { TS_FIELD_NAME: [ cls.window_test_start_date, pd.Timestamp("2015-01-07"), cls.window_test_start_date, pd.Timestamp("2015-01-17"), ], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-20"), pd.Timestamp("2015-01-20"), pd.Timestamp("2015-02-10"), pd.Timestamp("2015-02-10"), ], "estimate": [120.0, 121.0] + [220.0, 221.0], FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2, FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 20, } ) concatted = pd.concat( [sid_0_timeline, sid_10_timeline, sid_20_timeline] ).reset_index() np.random.seed(0) return concatted.reindex(np.random.permutation(concatted.index)) @classmethod def get_sids(cls): sids = sorted(cls.events[SID_FIELD_NAME].unique()) # Add extra sids between sids in our data. We want to test that we # apply adjustments to the correct sids. return [ sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1]) ] + [sids[-1]] @classmethod def make_expected_timelines(cls): return {} @classmethod def init_class_fixtures(cls): super(WithEstimateWindows, cls).init_class_fixtures() cls.create_expected_df_for_factor_compute = partial( create_expected_df_for_factor_compute, cls.window_test_start_date, cls.get_sids(), ) cls.timelines = cls.make_expected_timelines() @parameterized.expand(window_test_cases) def test_estimate_windows_at_quarter_boundaries( self, start_date, num_announcements_out ): dataset = QuartersEstimates(num_announcements_out) trading_days = self.trading_days timelines = self.timelines # The window length should be from the starting index back to the first # date on which we got data. The goal is to ensure that as we # progress through the timeline, all data we got, starting from that # first date, is correctly overwritten. window_len = ( self.trading_days.get_loc(start_date) - self.trading_days.get_loc(self.window_test_start_date) + 1 ) class SomeFactor(CustomFactor): inputs = [dataset.estimate] window_length = window_len def compute(self, today, assets, out, estimate): today_idx = trading_days.get_loc(today) today_timeline = ( timelines[num_announcements_out] .loc[today] .reindex(trading_days[: today_idx + 1]) .values ) timeline_start_idx = len(today_timeline) - window_len assert_almost_equal(estimate, today_timeline[timeline_start_idx:]) engine = self.make_engine() engine.run_pipeline( Pipeline({"est": SomeFactor()}), start_date=start_date, # last event date we have end_date=pd.Timestamp("2015-02-10", tz="utc"), ) class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) @classmethod def make_expected_timelines(cls): oneq_previous = pd.concat( [ pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), ], end_date, ) for end_date in pd.date_range("2015-01-09", "2015-01-19") ] ), cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), (10, np.NaN, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-20")), ], pd.Timestamp("2015-01-20"), ), cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), (10, np.NaN, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-20")), ], pd.Timestamp("2015-01-21"), ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), (10, 111, pd.Timestamp("2015-01-22")), (20, 121, pd.Timestamp("2015-01-20")), ], end_date, ) for end_date in pd.date_range("2015-01-22", "2015-02-04") ] ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), (10, 311, pd.Timestamp("2015-02-05")), (20, 121, pd.Timestamp("2015-01-20")), ], end_date, ) for end_date in pd.date_range("2015-02-05", "2015-02-09") ] ), cls.create_expected_df_for_factor_compute( [ (0, 201, pd.Timestamp("2015-02-10")), (10, 311, pd.Timestamp("2015-02-05")), (20, 221, pd.Timestamp("2015-02-10")), ], pd.Timestamp("2015-02-10"), ), ] ) twoq_previous = pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), ], end_date, ) for end_date in pd.date_range("2015-01-09", "2015-02-09") ] # We never get estimates for S1 for 2Q ago because once Q3 # becomes our previous quarter, 2Q ago would be Q2, and we have # no data on it. + [ cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-02-10")), (10, np.NaN, pd.Timestamp("2015-02-05")), (20, 121, pd.Timestamp("2015-02-10")), ], pd.Timestamp("2015-02-10"), ) ] ) return {1: oneq_previous, 2: twoq_previous} class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) @classmethod def make_expected_timelines(cls): oneq_next = pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 100, cls.window_test_start_date), (10, 110, pd.Timestamp("2015-01-09")), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-07")), ], pd.Timestamp("2015-01-09"), ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 100, cls.window_test_start_date), (10, 110, pd.Timestamp("2015-01-09")), (10, 111, pd.Timestamp("2015-01-12")), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-07")), ], end_date, ) for end_date in pd.date_range("2015-01-12", "2015-01-19") ] ), cls.create_expected_df_for_factor_compute( [ (0, 100, cls.window_test_start_date), (0, 101, pd.Timestamp("2015-01-20")), (10, 110, pd.Timestamp("2015-01-09")), (10, 111, pd.Timestamp("2015-01-12")), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-07")), ], pd.Timestamp("2015-01-20"), ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), (10, 110, pd.Timestamp("2015-01-09")), (10, 111, pd.Timestamp("2015-01-12")), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp("2015-01-17")), ], end_date, ) for end_date in pd.date_range("2015-01-21", "2015-01-22") ] ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), (10, 310, pd.Timestamp("2015-01-09")), (10, 311, pd.Timestamp("2015-01-15")), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp("2015-01-17")), ], end_date, ) for end_date in pd.date_range("2015-01-23", "2015-02-05") ] ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp("2015-01-17")), ], end_date, ) for end_date in pd.date_range("2015-02-06", "2015-02-09") ] ), cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), (0, 201, pd.Timestamp("2015-02-10")), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp("2015-01-17")), ], pd.Timestamp("2015-02-10"), ), ] ) twoq_next = pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), ], end_date, ) for end_date in pd.date_range("2015-01-09", "2015-01-11") ] + [ cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), ], end_date, ) for end_date in pd.date_range("2015-01-12", "2015-01-16") ] + [ cls.create_expected_df_for_factor_compute( [ (0, 200, pd.Timestamp("2015-01-12")), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp("2015-01-17")), ], pd.Timestamp("2015-01-20"), ) ] + [ cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), ], end_date, ) for end_date in pd.date_range("2015-01-21", "2015-02-10") ] ) return {1: oneq_next, 2: twoq_next} class WithSplitAdjustedWindows(WithEstimateWindows): """ ZiplineTestCase mixin providing fixures and a test to test running a Pipeline with an estimates loader over differently-sized windows and with split adjustments. """ split_adjusted_asof_date = pd.Timestamp("2015-01-14") @classmethod def make_events(cls): # Add an extra sid that has a release before the split-asof-date in # order to test that we're reversing splits correctly in the previous # case (without an overwrite) and in the next case (with an overwrite). sid_30 = pd.DataFrame( { TS_FIELD_NAME: [ cls.window_test_start_date, pd.Timestamp("2015-01-09"), # For Q2, we want it to start early enough # that we can have several adjustments before # the end of the first quarter so that we # can test un-adjusting & readjusting with an # overwrite. cls.window_test_start_date, # We want the Q2 event date to be enough past # the split-asof-date that we can have # several splits and can make sure that they # are applied correctly. pd.Timestamp("2015-01-20"), ], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-20"), pd.Timestamp("2015-01-20"), ], "estimate": [130.0, 131.0, 230.0, 231.0], FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2, FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 30, } ) # An extra sid to test no splits before the split-adjusted-asof-date. # We want an event before and after the split-adjusted-asof-date & # timestamps for data points also before and after # split-adjsuted-asof-date (but also before the split dates, so that # we can test that splits actually get applied at the correct times). sid_40 = pd.DataFrame( { TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-09"), pd.Timestamp("2015-02-10"), ], "estimate": [140.0, 240.0], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 40, } ) # An extra sid to test all splits before the # split-adjusted-asof-date. All timestamps should be before that date # so that we have cases where we un-apply and re-apply splits. sid_50 = pd.DataFrame( { TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")], EVENT_DATE_FIELD_NAME: [ pd.Timestamp("2015-01-09"), pd.Timestamp("2015-02-10"), ], "estimate": [150.0, 250.0], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 50, } ) return pd.concat( [ # Slightly hacky, but want to make sure we're using the same # events as WithEstimateWindows. cls.__base__.make_events(), sid_30, sid_40, sid_50, ] ) @classmethod def make_splits_data(cls): # For sid 0, we want to apply a series of splits before and after the # split-adjusted-asof-date we well as between quarters (for the # previous case, where we won't see any values until after the event # happens). sid_0_splits = pd.DataFrame( { SID_FIELD_NAME: 0, "ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100), "effective_date": ( pd.Timestamp("2014-01-01"), # Filter out # Split before Q1 event & after first estimate pd.Timestamp("2015-01-07"), # Split before Q1 event pd.Timestamp("2015-01-09"), # Split before Q1 event pd.Timestamp("2015-01-13"), # Split before Q1 event pd.Timestamp("2015-01-15"), # Split before Q1 event pd.Timestamp("2015-01-18"), # Split after Q1 event and before Q2 event pd.Timestamp("2015-01-30"), # Filter out - this is after our date index pd.Timestamp("2016-01-01"), ), } ) sid_10_splits = pd.DataFrame( { SID_FIELD_NAME: 10, "ratio": (0.2, 0.3), "effective_date": ( # We want a split before the first estimate and before the # split-adjusted-asof-date but within our calendar index so # that we can test that the split is NEVER applied. pd.Timestamp("2015-01-07"), # Apply a single split before Q1 event. pd.Timestamp("2015-01-20"), ), } ) # We want a sid with split dates that collide with another sid (0) to # make sure splits are correctly applied for both sids. sid_20_splits = pd.DataFrame( { SID_FIELD_NAME: 20, "ratio": ( 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, ), "effective_date": ( pd.Timestamp("2015-01-07"), pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-15"), pd.Timestamp("2015-01-18"), pd.Timestamp("2015-01-30"), ), } ) # This sid has event dates that are shifted back so that we can test # cases where an event occurs before the split-asof-date. sid_30_splits = pd.DataFrame( { SID_FIELD_NAME: 30, "ratio": (8, 9, 10, 11, 12), "effective_date": ( # Split before the event and before the # split-asof-date. pd.Timestamp("2015-01-07"), # Split on date of event but before the # split-asof-date. pd.Timestamp("2015-01-09"), # Split after the event, but before the # split-asof-date. pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-15"), pd.Timestamp("2015-01-18"), ), } ) # No splits for a sid before the split-adjusted-asof-date. sid_40_splits = pd.DataFrame( { SID_FIELD_NAME: 40, "ratio": (13, 14), "effective_date": ( pd.Timestamp("2015-01-20"), pd.Timestamp("2015-01-22"), ), } ) # No splits for a sid after the split-adjusted-asof-date. sid_50_splits = pd.DataFrame( { SID_FIELD_NAME: 50, "ratio": (15, 16), "effective_date": ( pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-14"), ), } ) return pd.concat( [ sid_0_splits, sid_10_splits, sid_20_splits, sid_30_splits, sid_40_splits, sid_50_splits, ] ) class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return PreviousSplitAdjustedEarningsEstimatesLoader( events, columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=["estimate"], split_adjusted_asof=cls.split_adjusted_asof_date, ) @classmethod def make_expected_timelines(cls): oneq_previous = pd.concat( [ pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), # Undo all adjustments that haven't happened yet. (30, 131 * 1 / 10, pd.Timestamp("2015-01-09")), (40, 140.0, pd.Timestamp("2015-01-09")), (50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")), ], end_date, ) for end_date in pd.date_range("2015-01-09", "2015-01-12") ] ), cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, 131, pd.Timestamp("2015-01-09")), (40, 140.0, pd.Timestamp("2015-01-09")), (50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")), ], pd.Timestamp("2015-01-13"), ), cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, 131, pd.Timestamp("2015-01-09")), (40, 140.0, pd.Timestamp("2015-01-09")), (50, 150.0, pd.Timestamp("2015-01-09")), ], pd.Timestamp("2015-01-14"), ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, 131 * 11, pd.Timestamp("2015-01-09")), (40, 140.0, pd.Timestamp("2015-01-09")), (50, 150.0, pd.Timestamp("2015-01-09")), ], end_date, ) for end_date in pd.date_range("2015-01-15", "2015-01-16") ] ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), (10, np.NaN, cls.window_test_start_date), (20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")), (30, 231, pd.Timestamp("2015-01-20")), (40, 140.0 * 13, pd.Timestamp("2015-01-09")), (50, 150.0, pd.Timestamp("2015-01-09")), ], end_date, ) for end_date in pd.date_range("2015-01-20", "2015-01-21") ] ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 101, pd.Timestamp("2015-01-20")), (10, 111 * 0.3, pd.Timestamp("2015-01-22")), (20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")), (30, 231, pd.Timestamp("2015-01-20")), (40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")), (50, 150.0, pd.Timestamp("2015-01-09")), ], end_date, ) for end_date in pd.date_range("2015-01-22", "2015-01-29") ] ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 101 * 7, pd.Timestamp("2015-01-20")), (10, 111 * 0.3, pd.Timestamp("2015-01-22")), (20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")), (30, 231, pd.Timestamp("2015-01-20")), (40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")), (50, 150.0, pd.Timestamp("2015-01-09")), ], end_date, ) for end_date in pd.date_range("2015-01-30", "2015-02-04") ] ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 101 * 7, pd.Timestamp("2015-01-20")), (10, 311 * 0.3, pd.Timestamp("2015-02-05")), (20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")), (30, 231, pd.Timestamp("2015-01-20")), (40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")), (50, 150.0, pd.Timestamp("2015-01-09")), ], end_date, ) for end_date in pd.date_range("2015-02-05", "2015-02-09") ] ), cls.create_expected_df_for_factor_compute( [ (0, 201, pd.Timestamp("2015-02-10")), (10, 311 * 0.3, pd.Timestamp("2015-02-05")), (20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")), (30, 231, pd.Timestamp("2015-01-20")), (40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")), (50, 250.0, pd.Timestamp("2015-02-10")), ], pd.Timestamp("2015-02-10"), ), ] ) twoq_previous = pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, np.NaN, cls.window_test_start_date), ], end_date, ) for end_date in pd.date_range("2015-01-09", "2015-01-19") ] + [ cls.create_expected_df_for_factor_compute( [ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, 131 * 11 * 12, pd.Timestamp("2015-01-20")), ], end_date, ) for end_date in pd.date_range("2015-01-20", "2015-02-09") ] # We never get estimates for S1 for 2Q ago because once Q3 # becomes our previous quarter, 2Q ago would be Q2, and we have # no data on it. + [ cls.create_expected_df_for_factor_compute( [ (0, 101 * 7, pd.Timestamp("2015-02-10")), (10, np.NaN, pd.Timestamp("2015-02-05")), (20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")), (30, 131 * 11 * 12, pd.Timestamp("2015-01-20")), (40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")), (50, 150.0, pd.Timestamp("2015-02-10")), ], pd.Timestamp("2015-02-10"), ) ] ) return {1: oneq_previous, 2: twoq_previous} class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return NextSplitAdjustedEarningsEstimatesLoader( events, columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=["estimate"], split_adjusted_asof=cls.split_adjusted_asof_date, ) @classmethod def make_expected_timelines(cls): oneq_next = pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 100 * 1 / 4, cls.window_test_start_date), (10, 110, pd.Timestamp("2015-01-09")), (20, 120 * 5 / 3, cls.window_test_start_date), (20, 121 * 5 / 3, pd.Timestamp("2015-01-07")), (30, 130 * 1 / 10, cls.window_test_start_date), (30, 131 * 1 / 10, pd.Timestamp("2015-01-09")), (40, 140, pd.Timestamp("2015-01-09")), (50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")), ], pd.Timestamp("2015-01-09"), ), cls.create_expected_df_for_factor_compute( [ (0, 100 * 1 / 4, cls.window_test_start_date), (10, 110, pd.Timestamp("2015-01-09")), (10, 111, pd.Timestamp("2015-01-12")), (20, 120 * 5 / 3, cls.window_test_start_date), (20, 121 * 5 / 3, pd.Timestamp("2015-01-07")), (30, 230 * 1 / 10, cls.window_test_start_date), (40, np.NaN, pd.Timestamp("2015-01-10")), (50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")), ], pd.Timestamp("2015-01-12"), ), cls.create_expected_df_for_factor_compute( [ (0, 100, cls.window_test_start_date), (10, 110, pd.Timestamp("2015-01-09")), (10, 111, pd.Timestamp("2015-01-12")), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-07")), (30, 230, cls.window_test_start_date), (40, np.NaN, pd.Timestamp("2015-01-10")), (50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")), ], pd.Timestamp("2015-01-13"), ), cls.create_expected_df_for_factor_compute( [ (0, 100, cls.window_test_start_date), (10, 110, pd.Timestamp("2015-01-09")), (10, 111, pd.Timestamp("2015-01-12")), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp("2015-01-07")), (30, 230, cls.window_test_start_date), (40, np.NaN, pd.Timestamp("2015-01-10")), (50, 250.0, pd.Timestamp("2015-01-12")), ], pd.Timestamp("2015-01-14"), ), pd.concat( [ cls.create_expected_df_for_factor_compute( [ (0, 100 * 5, cls.window_test_start_date), (10, 110, pd.Timestamp("2015-01-09")), (10, 111, pd.Timestamp("2015-01-12")), (20, 120 * 0.7, cls.window_test_start_date), (20, 121 * 0.7, pd.Timestamp("2015-01-07")), (30, 230 * 11, cls.window_test_start_date), (40, 240, pd.Timestamp("2015-01-15")), (50, 250.0, pd.Timestamp("2015-01-12")), ], end_date, ) for end_date in pd.date_range("2015-01-15", "2015-01-16") ] ), cls.create_expected_df_for_factor_compute( [ (0, 100 * 5 * 6, cls.window_test_start_date), (0, 101, pd.Timestamp("2015-01-20")), (10, 110 * 0.3, pd.Timestamp("2015-01-09")), (10, 111 * 0.3, pd.Timestamp("2015-01-12")), (20, 120 * 0.7 * 0.8, cls.window_test_start_date), (20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-07")), (30, 230 * 11 * 12, cls.window_test_start_date), (30, 231, pd.Timestamp("2015-01-20")), (40, 240 * 13, pd.Timestamp("2015-01-15")), (50, 250.0, pd.Timestamp("2015-01-12")), ], pd.Timestamp("2015-01-20"), ), cls.create_expected_df_for_factor_compute( [ (0, 200 * 5 * 6, pd.Timestamp("2015-01-12")), (10, 110 * 0.3, pd.Timestamp("2015-01-09")), (10, 111 * 0.3, pd.Timestamp("2015-01-12")), (20, 220 * 0.7 * 0.8, cls.window_test_start_date), (20, 221 * 0.8, pd.Timestamp("2015-01-17")), (40, 240 * 13,
pd.Timestamp("2015-01-15")
pandas.Timestamp
""" This script intends to use PySPaRTAN module to generate predicted matrices used in the paper. PySPaRTAN has 3 Hyperparameters:Spectrum P, lamda, and rsL2 We can run PySPaRTAN by specifying some values to those parameters or using default ones in the script. We can also use cross-validation to generate the optional values for those Hyperparameters at first, and then run PySPaRTAN to generate the projections. When running this script from command line, the following parameters can be added to the command: --input_dir : directory of input files, default="../data/inputs" --output_dir : directory of output files, default="../data/outputs" --dataset_D : the name of gene-TF matrix file. the file requires .csv format. only contains name here, do not include ".csv" extension --dataset_P : the name of protein matrix the same other requirements as --dataset_D --dataset_Y : the name of gene expression matrix the same other requirements as --dataset_D --spectrumP : Dimension reduction coefficient on protein space, default=0.7 --lamda : LASSO regression coefficient,default=0.001 --rsL2 : ridge regression coefficient, default=0.001 --normalization : type of normalizion performed on matrices, default is l2 normalization --fold : how many folds to be used when doing cross-validation. default=0, means using specified hyper-parameters, do not do cross-validation This script requires numpy, pandas, sklearn to be installed in the python running environment """ import argparse import os import numpy as np import pandas as pd from sklearn.model_selection import KFold from sklearn.preprocessing import normalize from PySPaRTAN import PySPaRTAN parser = argparse.ArgumentParser() parser.add_argument("--input_dir", help="directory of input files", type=str, default="../data/inputs") parser.add_argument("--output_dir", help="directory of output files", type=str, default="../data/outputs") parser.add_argument("--dataset_D", help="name of gene-TF matrix", type=str, default="Dpbmc") parser.add_argument("--dataset_P", help="name of the dataset P which will be passed in", type=str, default="Ppbmc5kn_CD16") parser.add_argument("--dataset_Y", help="name of the dataset Y which will be passed in", type=str, default="Ypbmc5kn_CD16") parser.add_argument("--spectrumP", help="Dimension reduction coefficient on protein space", type=float, default=0.7) parser.add_argument("--lamda", help="LASSO regression coefficient", type=float, default=0.001) parser.add_argument("--rsL2", help="ridge regression coefficient", type=float, default=0.001) parser.add_argument("--normalization", help="type of normalizion performed on matrices,\ no normalization if set to empty", type=str, default="l2") parser.add_argument('--fold', help="how many folds for the cross_validation.\ No cross_validation and using default/specified parameters if set to 0", type=int, default=0) args = parser.parse_args() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) print("Read in datasets D, P, and Y ...") D_ori = pd.read_csv(os.path.join(args.input_dir, args.dataset_D+'.csv'), index_col=0) P_ori = pd.read_csv(os.path.join(args.input_dir, args.dataset_P+'.csv'), index_col=0) Y_ori = pd.read_csv(os.path.join(args.input_dir, args.dataset_Y+'.csv'), index_col=0) TF_name = list(D_ori.columns) cell_name = list(Y_ori.columns) gene_name = list(Y_ori.index) protein_name = list(P_ori.columns) D_mat = D_ori.values P_mat = P_ori.values Y_mat = Y_ori.values # normalize the dataset if args.normalization != "": D = normalize(D_mat, norm=args.normalization, axis=0) Y = normalize(Y_mat, norm=args.normalization, axis=0) P = normalize(P_mat, norm=args.normalization, axis=1) # create the object of SPaRTAN reg = PySPaRTAN() # cross-validate to determine optimal parameters fold = args.fold if fold != 0: # using cross validation to determine the optimal parameters lamdas = [0.001, 0.01, 0.1, 0.2, 0.3] rsL2s = [0.001, 0.01, 0.1] spectrumAs = [1] spectrumBs = [0.5, 0.6, 0.7] lenlamdas = len(lamdas) lenrsL2s = len(rsL2s) lenspAs = len(spectrumAs) lenspBs = len(spectrumBs) corr_all_pearson = np.zeros((lenspAs, lenspBs, lenlamdas, lenrsL2s)) for a in range(0, lenspAs): for b in range(0, lenspBs): for l in range(0, lenlamdas): for r in range(0, lenrsL2s): print("cross validating spectrumP={}, lambda={}, rsL2={}" .format(spectrumBs[b], lamdas[l], rsL2s[r])) sum_corr_pearson = 0 kf = KFold(n_splits=fold) for train_index, test_index in kf.split(P_mat): # split the data into train and test set P_train, P_test = P_mat[train_index, :], P_mat[test_index, :] Y_train, Y_test = Y_mat[:, train_index], Y_mat[:, test_index] # normalize the train and test set Y_train = normalize(Y_train, axis=0) Y_test = normalize(Y_test, axis=0) P_train = normalize(P_train, axis=1) P_test = normalize(P_test, axis=1) # train the model reg.fit( D, P_train, Y_train, lamda=lamdas[l], rsL2=rsL2s[r], spectrumA=spectrumAs[a], spectrumB=spectrumBs[b], ) # get predicted value Y_pred on P_test Y_pred = reg.predict(P_test) # get the correlation bewteen Y_pred and Y_test corr_pearson = reg.get_corr(Y_pred, Y_test) sum_corr_pearson = sum_corr_pearson + corr_pearson corr_all_pearson[a, b, l, r] = sum_corr_pearson / fold # retrive the best parameters max_a, max_b, max_l, max_r = np.unravel_index( corr_all_pearson.argmax(), corr_all_pearson.shape ) lamda_best = lamdas[max_l] rsL2_best = rsL2s[max_r] spectrumA_best = spectrumAs[max_a] spectrumB_best = spectrumBs[max_b] print("lamda_best={}, rsL2_best={}, spectrumA_best={}, spectrumB_best={}" .format(lamda_best, rsL2_best, spectrumA_best, spectrumB_best)) else: # fold ==0: using default/specified paramters lamda_best = args.lamda rsL2_best = args.rsL2 spectrumA_best = 1 spectrumB_best = args.spectrumP print("Processing ...") # re-train the model reg.fit(D, P, Y, lamda_best, rsL2_best, spectrumA_best, spectrumB_best) # retrieve W, projD, projP W = reg.get_W() projD = reg.get_projD() projP = reg.get_projP() df_W =
pd.DataFrame(data=W, index=TF_name, columns=protein_name)
pandas.DataFrame
# IMPORTAÇÃO DOS OBJETOS CRIADOS # from screenManager import Screen, Button from inputBox import Inputbox from simElements import Bolinha, Timer import pygame as pg import numpy as np import pandas as pd import operator # VARIÁVEIS GLOBAIS # # Aqui o Pygame é iniciado e são definidas todas as variáveis utilizadas como: # altura e largura da janela, telas (menu de parâmetros, simulação e exibição dos resultados), # input boxes para que o usuário possa inserir valores, botões, timer, etc. pg.init() pg.font.init() w_w = 1024 w_h = 768 FONT = pg.font.Font(None, 32) time = 10 done = False colours = {"white": (255,255,255), "black": (0,0,0), "shadow": (90,90,90), "bg": (220,220,220), "red": (255,0,0), "green": (0,255,0), "blue": (0,0,255)} values = {'FNN': 0.0, 'FRP': 0.0, 'SCD': 0.0, 'SCE': 0.0, 'SHI': 0.0, 'ERR': 0.0, 'ENR': 0.0, 'EPE': 0.0, 'PNS': 0.0, 'PRS': 0.0, 'PEX': 0.0, 'PRM': 0.0, 'PMN': 0.0, 'PLG': 0.0, 'SPP': 0.0, 'CSU': 0.0} # Janelas paramScreen = Screen("Valores", w_w, w_h, fill=colours['bg']) simScreen = Screen("Simulação", w_w, w_h, fill=colours['bg']) resScreen = Screen("Resultados", w_w, w_h, fill=colours['bg']) win = paramScreen.makeCurrent() # Tela atual finalResults = [] # Botões bStart = Button(800,700,150,50,colours['blue'],colours['blue'],None,40,colours['black'],"Start") bReturn = Button(50,700,150,50,colours['red'],colours['red'],None,40,colours['black'],"Return") bExportResults = Button(775,700,200,50,colours['green'],colours['green'],None,40,colours['black'],"Export as .txt") # REFERENTA À TELA DE PARÂMETROS # # Parâmetros para tela de valores FNN = Inputbox("FNN",100,100,50,32) FRP = Inputbox("FRP",100,150,50,32) SCD = Inputbox("SCD",100,200,50,32) SCE = Inputbox("SCE",100,250,50,32) SHI = Inputbox("SHI",100,300,50,32) ERR = Inputbox("ERR",100,350,50,32) ENR = Inputbox("ENR",100,400,50,32) EPE = Inputbox("EPE",100,450,50,32) PNS = Inputbox("PNS",600,100,50,32) PRS = Inputbox("PRS",600,150,50,32) PEX = Inputbox("PEX",600,200,50,32) PRM = Inputbox("PRM",600,250,50,32) PMN = Inputbox("PMN",600,300,50,32) PLG = Inputbox("PLG",600,350,50,32) SPP = Inputbox("SPP",600,400,50,32) CSU = Inputbox("CSU",600,450,50,32) # Armazenamento dos parâmetros numa lista input_boxes = [FNN, FRP, SCD, SCE, SHI, ERR, ENR, EPE, PNS, PRS, PEX, PRM, PMN, PLG, SPP, CSU] exportValues = np.zeros(len(input_boxes)) # Lista com os valores de cada parâmetros para sem exportado # REFERENTE À TELA DE SIMULAÇÃO # # Timer clock = pg.time.Clock() #timer = Timer(1,850,50) pg.time.set_timer(pg.USEREVENT+1,1000) timer_event = pg.USEREVENT+1 minsize = 15 maxsize = 100 # Cálculos da simulação op = {"+": operator.add, "-": operator.sub} # Operadores para os tipos de interações tabela =
pd.read_csv("interactions.csv", index_col=0)
pandas.read_csv
import os from datetime import datetime import nose import pandas as pd from pandas import compat from pandas.util.testing import network, assert_frame_equal, with_connectivity_check from numpy.testing.decorators import slow import pandas.util.testing as tm if compat.PY3: raise nose.SkipTest("python-gflags does not support Python 3 yet") try: import httplib2 import pandas.io.ga as ga from pandas.io.ga import GAnalytics, read_ga from pandas.io.auth import AuthenticationConfigError, reset_default_token_store from pandas.io import auth except ImportError: raise nose.SkipTest("need httplib2 and auth libs") class TestGoogle(tm.TestCase): _multiprocess_can_split_ = True def test_remove_token_store(self): auth.DEFAULT_TOKEN_FILE = 'test.dat' with open(auth.DEFAULT_TOKEN_FILE, 'w') as fh: fh.write('test') reset_default_token_store() self.assertFalse(os.path.exists(auth.DEFAULT_TOKEN_FILE)) @with_connectivity_check("http://www.google.com") def test_getdata(self): try: end_date = datetime.now() start_date = end_date - pd.offsets.Day() * 5 end_date = end_date.strftime('%Y-%m-%d') start_date = start_date.strftime('%Y-%m-%d') reader = GAnalytics() df = reader.get_data( metrics=['avgTimeOnSite', 'visitors', 'newVisits', 'pageviewsPerVisit'], start_date=start_date, end_date=end_date, dimensions=['date', 'hour'], parse_dates={'ts': ['date', 'hour']}, index_col=0) self.assertIsInstance(df, pd.DataFrame) self.assertIsInstance(df.index, pd.DatetimeIndex) self.assertGreater(len(df), 1) self.assertTrue('date' not in df) self.assertTrue('hour' not in df) self.assertEqual(df.index.name, 'ts') self.assertTrue('avgTimeOnSite' in df) self.assertTrue('visitors' in df) self.assertTrue('newVisits' in df) self.assertTrue('pageviewsPerVisit' in df) df2 = read_ga( metrics=['avgTimeOnSite', 'visitors', 'newVisits', 'pageviewsPerVisit'], start_date=start_date, end_date=end_date, dimensions=['date', 'hour'], parse_dates={'ts': ['date', 'hour']}, index_col=0) assert_frame_equal(df, df2) except AuthenticationConfigError: raise nose.SkipTest("authentication error") @with_connectivity_check("http://www.google.com") def test_iterator(self): try: reader = GAnalytics() it = reader.get_data( metrics='visitors', start_date='2005-1-1', dimensions='date', max_results=10, chunksize=5, index_col=0) df1 = next(it) df2 = next(it) for df in [df1, df2]: self.assertIsInstance(df, pd.DataFrame) self.assertIsInstance(df.index, pd.DatetimeIndex) self.assertEqual(len(df), 5) self.assertTrue('date' not in df) self.assertEqual(df.index.name, 'date') self.assertTrue('visitors' in df) self.assertTrue((df2.index > df1.index).all()) except AuthenticationConfigError: raise nose.SkipTest("authentication error") def test_v2_advanced_segment_format(self): advanced_segment_id = 1234567 query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id) self.assertEqual(query['segment'], 'gaid::' + str(advanced_segment_id), "An integer value should be formatted as an advanced segment.") def test_v2_dynamic_segment_format(self): dynamic_segment_id = 'medium==referral' query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=dynamic_segment_id) self.assertEqual(query['segment'], 'dynamic::ga:' + str(dynamic_segment_id), "A string value with more than just letters and numbers should be formatted as a dynamic segment.") def test_v3_advanced_segment_common_format(self): advanced_segment_id = 'aZwqR234' query =
ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id)
pandas.io.ga.format_query
# --- # jupyter: # jupytext: # formats: ipynb,py # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.6.0 # kernelspec: # display_name: deep_ml_curriculum # language: python # name: deep_ml_curriculum # --- # Note download data from https://drive.google.com/drive/folders/1EgDN57LDuvlZAwr5-eHWB5CTJ7K9HpDP # # Credit to this repo: https://github.com/LukasMosser/geolink_dataset # # ## Data Disclaimer # # All the data serving as an input to these notebooks was generously donated by GEOLINK # and is CC-by-SA 4.0 # # If you use their data please reference their dataset properly to give them credit for their contribution. # %reload_ext autoreload # %autoreload 2 import lasio import matplotlib.pyplot as plt # %matplotlib inline import os from tqdm.auto import tqdm import pandas as pd import geopandas as gpd import numpy as np from pathlib import Path from sklearn import preprocessing from operator import itemgetter # # in and our directories data_locations = Path( "../../data/raw/geolink_dataset/GEOLINK North sea wells with Lithology interpretation/GEOLINK_Lithology and wells NORTH SEA" ) data_locations_wellheads = Path("../../data/raw/geolink_dataset/norge_well_heads") interim_locations = Path("../../data/processed/geolink_norge_dataset/") interim_locations2 = Path("../../data/interim/geolink_norge_dataset/") # # load and save as parquet df_lithology = pd.read_excel(data_locations / "../Lithology code data.xlsx", header=1)[ :-1 ] df_lithology["Abbreviation"] = pd.to_numeric(df_lithology["Abbreviation"]) df_lithology.to_parquet( interim_locations / "geolink_norge_lithology.parquet", compression="gzip" ) df_lithology # + # TODO rename well heads df_well_tops = pd.concat( [ pd.read_csv(data_locations_wellheads / "wellbore_exploration_all.csv"), pd.read_csv(data_locations_wellheads / "wellbore_development_all.csv"),
pd.read_csv(data_locations_wellheads / "wellbore_other_all.csv")
pandas.read_csv
"""Provide DataFrameGroupBy and DataFrameRowwise""" from typing import Any, Callable, Mapping, Tuple, Union from abc import ABC, abstractmethod import numpy import pandas from pandas import DataFrame, Series, RangeIndex from pipda.function import Function, FastEvalFunction from pipda.symbolic import DirectRefItem, DirectRefAttr, ReferenceAttr from pipda.utils import CallingEnvs from .defaults import DEFAULT_COLUMN_PREFIX from .types import StringOrIter, is_scalar from .utils import apply_dtypes from ..base import setdiff class DataFrameGroupByABC(DataFrame, ABC): """Abstract class for DataFrameGroupBy""" def __init__( self, data: Any, _group_vars: StringOrIter = None, _group_drop: bool = None, # used for copy, etc so we don't need to recompute the group data _group_data: DataFrame = None, **kwargs: Any, ) -> None: if isinstance(data, DataFrame): kwargs["copy"] = True super().__init__(data, **kwargs) # drop index to align to tidyverse's APIs self.reset_index(drop=True, inplace=True) # rowwise if _group_vars is None: _group_vars = [] if is_scalar(_group_vars): _group_vars = [_group_vars] # In order to align to dplyr's API self.attrs["_group_drop"] = True if _group_drop is None else _group_drop self.attrs["_group_vars"] = _group_vars self.attrs["_group_data"] = _group_data @abstractmethod def _datar_apply( self, _func: Callable, *args: Any, _mappings: Mapping[str, Any] = None, _method: str = "apply", _groupdata: bool = True, _drop_index: bool = True, **kwargs: Any, ) -> DataFrame: pass def copy( self, deep: bool = True, copy_grouped: bool = False, ) -> "DataFrameGroupByABC": """Copy the dataframe and keep the class""" if not copy_grouped: return super().copy(deep) if deep: return self.__class__( super().copy(), _group_vars=self.attrs["_group_vars"][:], _group_drop=self.attrs["_group_drop"], _group_data=self._group_data.copy(), ) # we still need to calculate _grouped_df return self.__class__( self, _group_vars=self.attrs["_group_vars"], _group_drop=self.attrs["_group_drop"], _group_data=self._group_data, ) class DataFrameGroupBy(DataFrameGroupByABC): """A customized DataFrameGroupBy class, other than pandas' DataFrameGroupBy Pandas' DataFrameGroupBy has obj refer to the original data frame. We do it the reverse way by attaching the groupby object to the frame. So that it is: 1. easier to write single dispatch functions, as DataFrameGroupBy is now a subclass of pandas' DataFrame 2. easier to display the frame. We can use all utilities for frame to display. By `core._frame_format_patch.py`, we are also able to show the grouping information 3. possible for future optimizations Known Issues: - Due to https://github.com/pandas-dev/pandas/issues/35202 Currently `dropna` is fixed to True of `df.groupby(...)` So no NAs will be kept in group vars - `_drop = FALSE` does not work when there are multiple group vars - Since group vars are required in `DataFrame.groupby()`, so virtual groupings are not supported. - Groupby on a column with tuples creates a multiindex https://github.com/pandas-dev/pandas/issues/21340 - Order of group data/groups does not follow the categories/levels of a category group variable. Args: data: Data that used to construct the frame. **kwargs: Additional keyword arguments passed to DataFrame constructor. _group_vars: The grouping variables _group_drop: Whether to drop non-observable rows _group_data: In most cases, used for copy. Use this groupdata if provided to avoid recalculate. Attributes: _grouped_df: The grouped data frame (pandas' DataFrameGroupBy object) """ def __init__( self, data: Any, _group_vars: StringOrIter = None, _group_drop: bool = None, _group_data: DataFrame = None, **kwargs: Any, ) -> None: super().__init__(data, _group_vars, _group_drop, _group_data, **kwargs) self.__dict__["_grouped_df"] = self.groupby( _group_vars, dropna=True, sort=False, observed=self.attrs["_group_drop"], ) # @property # def _constructor(self): # return DataFrameGroupBy @property def _group_data(self): """The group data""" # compose group data using self._grouped_df.grouper if self.attrs["_group_data"] is None: self.attrs["_group_data"] = DataFrame( ( [key] + [list(val)] if len(self.attrs["_group_vars"]) == 1 else list(key) + [list(val)] for key, val in sorted( self._grouped_df.grouper.groups.items(), key=lambda item: item[1][0] if len(item[1]) > 0 else self.shape[0], ) ), columns=self.attrs["_group_vars"] + ["_rows"], ) apply_dtypes( self.attrs["_group_data"], self.dtypes[self.attrs["_group_vars"]].to_dict(), ) return self.attrs["_group_data"] def _datar_apply( self, _func: Callable, *args: Any, _mappings: Mapping[str, Any] = None, _method: str = "apply", _groupdata: bool = True, _drop_index: bool = True, **kwargs: Any, ) -> DataFrame: """Customized apply. Aggregation on single columns will be tried to optimized. Args: _func: The function to be applied. *args: The non-keyword arguments for the function _mappings: The mapping (new name => transformation/aggregation) This will be used to check if we can turn the apply into agg. transformations like `f.x.mean()` or `mean(f.x)` will use agg to do the aggregation. `_func` will be used to apply if any item fails to be optimized. _method: Only optimize single column transformations when `agg` is provided _groupdata: Whether attach the group data to the result data frame anyway. Pandas will lose them if is a transform (num. of rows of result data frame is equal to the source data frame) instead of an aggregation _drop_index: Whether we should drop the index when the sub data frame goes into the apply function (`_func`). Returns: The transformed/aggregated data frame """ optms = _optimizable(_mappings) if _method == "agg" and optms: out = self._datar_agg(optms) else: group_index = -1 def _applied(subdf): nonlocal group_index group_index += 1 if _drop_index: subdf = subdf.reset_index(drop=True) subdf.attrs["_group_index"] = group_index subdf.attrs["_group_data"] = self._group_data ret = _func(subdf, *args, **kwargs) return None if ret is None else ret # keep the order out = self._grouped_df.apply(_applied).sort_index(level=-1) if not _groupdata: return out.reset_index(drop=True) if ( self.shape[0] > 0 and self.shape[0] == out.shape[0] and out.index.names == [None] ): gkeys = self._group_data[ self._group_data.columns[:-1].difference(out.columns) ] return
pandas.concat([gkeys, out], axis=1)
pandas.concat
""" The goal of this script is to use python datetimes to quickly choose whether to download RAP or RUC data """ import os import glob import numpy as np import pandas as pd import sys #define the subprocess to run def run_process_rap(spc_date_string): os.system('python download_rap_analyses.py --first_init_time_string={} --last_init_time_string={} --top_local_directory_name="/ourdisk/hpc/ai2es/tornado/rap_data/"'.format(spc_date_string,spc_date_string)) def run_process_ruc(spc_date_string): os.system('python download_ruc_analyses.py --first_init_time_string={} --last_init_time_string={} --top_local_directory_name="/ourdisk/hpc/ai2es/tornado/ruc_data/"'.format(spc_date_string,spc_date_string)) #load the spc date string from the input line spc_date_string = sys.argv[1] #convert to dtime spc_datetime =
pd.to_datetime(spc_date_string)
pandas.to_datetime
#%% from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import matplotlib.pyplot as plt from textblob import TextBlob import twitterscraper as ts import os import re import json import datetime as dt import yfinance as yf import plotly import plotly.express as px import plotly.graph_objs as go #%% # ------------------ # Got this method of pulling tweets form here: # ------------------ # https: // medium.com/@kevin.a.crystal/scraping-twitter-with-tweetscraper-and-python-ea783b40443b # https: // github.com/jenrhill/Power_Outage_Identification/blob/master/code/1_Data_Collection_and_EDA.ipynb # https: // www.youtube.com/watch?v = zF_Q2v_9zKY user = 'elonmusk' limit = 10000 tweets = ts.query_tweets_from_user(user=user, limit=limit) #%% class TweetAnalyzer(): """ Functionality for analyzing and categorizing content from tweets. """ #clean tweets def clean_tweet(self, tweet): return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split()) #creating sentimental score using TextBlob def analyze_sentiment_score(self, tweet): analysis_score = TextBlob(self.clean_tweet(tweet)) analysis_score = analysis_score.sentiment.polarity return analysis_score #Determining positive vs negative tweets def analyze_sentiment_result(self, tweet): analysis_result = TextBlob(self.clean_tweet(tweet)) if analysis_result.sentiment.polarity >= 0.3: return 'Positive' elif analysis_result.sentiment.polarity <= -0.3: return 'Negative' else: return '0' def tweets_to_data_frame(self, tweets): df2 = pd.DataFrame( data=[tweet.timestamp for tweet in tweets], columns=['Date']) df2['Tweet'] = np.array([tweet.text for tweet in tweets]) df2['Replied_Tweet'] = np.array([tweet.is_replied for tweet in tweets]) df2['Likes'] = np.array([tweet.likes for tweet in tweets]) df2['Reply_Count'] = np.array([tweet.replies for tweet in tweets]) df2['Retweets'] = np.array([tweet.retweets for tweet in tweets]) return df2 #%% if __name__ == '__main__': tweet_analyzer = TweetAnalyzer() df2 = tweet_analyzer.tweets_to_data_frame(tweets) df2['Sentiment_Score'] = np.array( [tweet_analyzer.analyze_sentiment_score(tweet) for tweet in df2['Tweet']]) df2['Sentiment_Result'] = np.array( [tweet_analyzer.analyze_sentiment_result(tweet) for tweet in df2['Tweet']]) mainData = df2.copy() mainData.head() #%% neg = mainData[mainData['Sentiment_Score'] == "Negative"] # .where('Sentiment Result'=='Positive') neg = neg.drop(columns = ['Replied_Tweet','Likes','Reply_Count','Retweets']) neg.sort_values('Sentiment_Score').to_csv('neg.csv') # %% # Truly determining what day the tweet will affect # Later than 4pm est then the tweet will affect the next day # Tweets during the day will affect the current day def checkDates(d): if d.date().weekday() == 4 and d.time().hour >= 16: return d + pd.Timedelta(days=3) elif d.date().weekday() == 5: return d + pd.Timedelta(days=2) elif d.date().weekday() == 6: return d + pd.Timedelta(days=1) else: return d mainData['Tweet_Date'] = mainData['Date'].apply( lambda d: checkDates(pd.to_datetime(d))).dt.date mainData.sort_values(by="Date") # %% mainData = mainData.groupby('Tweet_Date').agg( {'Likes': 'sum', 'Retweets': 'sum', 'Reply_Count': 'sum', 'Sentiment_Score': 'mean'}) # mainData.reset_index().head() # %% # Get the data of the stock Tesla Stock (TSLA) stockData = yf.download("TSLA", start="2019-4-1", end="2020-02-26") stockData.reset_index().head() stockData = stockData.drop(columns = ['Open','High','Low','Adj Close', 'Volume']) stockData = stockData.rename(columns={"Close": "Stock_Price"}) stockData.info() #%% #Joining Dataframes AllData = mainData.join(stockData, lsuffix='Tweet_Date', rsuffix='Date') AllData.head(17) # %% #scaling data # Get column names first names = AllData.columns index = AllData.index # Create the Scaler object scaler = preprocessing.StandardScaler() # Fit your data on the scaler object scaled_df = scaler.fit_transform(AllData) scaled_df =
pd.DataFrame(scaled_df, columns=names, index=index)
pandas.DataFrame
# Licensed to Modin Development Team under one or more contributor license agreements. # See the NOTICE file distributed with this work for additional information regarding # copyright ownership. The Modin Development Team licenses this file to you under the # Apache License, Version 2.0 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. """ Implement DataFrame public API as Pandas does. Almost all docstrings for public and magic methods should be inherited from Pandas for better maintability. So some codes are ignored in pydocstyle check: - D101: missing docstring in class - D102: missing docstring in public method - D105: missing docstring in magic method Manually add documentation for methods which are not presented in pandas. """ import pandas from pandas.core.common import apply_if_callable from pandas.core.dtypes.common import ( infer_dtype_from_object, is_dict_like, is_list_like, is_numeric_dtype, ) from pandas.core.indexes.api import ensure_index_from_sequences from pandas.util._validators import validate_bool_kwarg from pandas.io.formats.printing import pprint_thing from pandas._libs.lib import no_default from pandas._typing import Label import itertools import functools import numpy as np import sys from typing import Optional, Sequence, Tuple, Union, Mapping import warnings from modin.error_message import ErrorMessage from modin.utils import _inherit_docstrings, to_pandas, hashable from modin.config import IsExperimental from .utils import ( from_pandas, from_non_pandas, ) from .iterator import PartitionIterator from .series import Series from .base import BasePandasDataset, _ATTRS_NO_LOOKUP from .groupby import DataFrameGroupBy from .accessor import CachedAccessor, SparseFrameAccessor @_inherit_docstrings(pandas.DataFrame, excluded=[pandas.DataFrame.__init__]) class DataFrame(BasePandasDataset): def __init__( self, data=None, index=None, columns=None, dtype=None, copy=False, query_compiler=None, ): """ Distributed DataFrame object backed by Pandas dataframes. Parameters ---------- data: NumPy ndarray (structured or homogeneous) or dict: Dict can contain Series, arrays, constants, or list-like objects. index: pandas.Index, list, ObjectID The row index for this DataFrame. columns: pandas.Index The column names for this DataFrame, in pandas Index object. dtype: Data type to force. Only a single dtype is allowed. If None, infer copy: bool Copy data from inputs. Only affects DataFrame / 2d ndarray input. query_compiler: query_compiler A query compiler object to manage distributed computation. """ if isinstance(data, (DataFrame, Series)): self._query_compiler = data._query_compiler.copy() if index is not None and any(i not in data.index for i in index): raise NotImplementedError( "Passing non-existant columns or index values to constructor not" " yet implemented." ) if isinstance(data, Series): # We set the column name if it is not in the provided Series if data.name is None: self.columns = [0] if columns is None else columns # If the columns provided are not in the named Series, pandas clears # the DataFrame and sets columns to the columns provided. elif columns is not None and data.name not in columns: self._query_compiler = from_pandas( DataFrame(columns=columns) )._query_compiler if index is not None: self._query_compiler = data.loc[index]._query_compiler elif columns is None and index is None: data._add_sibling(self) else: if columns is not None and any(i not in data.columns for i in columns): raise NotImplementedError( "Passing non-existant columns or index values to constructor not" " yet implemented." ) if index is None: index = slice(None) if columns is None: columns = slice(None) self._query_compiler = data.loc[index, columns]._query_compiler # Check type of data and use appropriate constructor elif query_compiler is None: distributed_frame = from_non_pandas(data, index, columns, dtype) if distributed_frame is not None: self._query_compiler = distributed_frame._query_compiler return warnings.warn( "Distributing {} object. This may take some time.".format(type(data)) ) if is_list_like(data) and not is_dict_like(data): old_dtype = getattr(data, "dtype", None) values = [ obj._to_pandas() if isinstance(obj, Series) else obj for obj in data ] if isinstance(data, np.ndarray): data = np.array(values, dtype=old_dtype) else: try: data = type(data)(values, dtype=old_dtype) except TypeError: data = values elif is_dict_like(data) and not isinstance( data, (pandas.Series, Series, pandas.DataFrame, DataFrame) ): data = { k: v._to_pandas() if isinstance(v, Series) else v for k, v in data.items() } pandas_df = pandas.DataFrame( data=data, index=index, columns=columns, dtype=dtype, copy=copy ) self._query_compiler = from_pandas(pandas_df)._query_compiler else: self._query_compiler = query_compiler def __repr__(self): from pandas.io.formats import console num_rows =
pandas.get_option("display.max_rows")
pandas.get_option
import numpy as np import pandas as pd from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF train =
pd.read_csv("../input/train.csv", index_col=0)
pandas.read_csv
""" Prisma Inc. database.py Status: UNDER DEVELOPMENT for Major Update Ryzen Made by <NAME>. """ import pandas as pd import os import requests import progressbar import gc import pymongo import gridfs from pprint import pprint import json import certifi from sneakers.api.low import builder as bd from sneakers.api.low import threading as thr import base64 import bson from bson.binary import Binary from bson.json_util import dumps, loads ca = certifi.where() #--------- MONGO DB IMPLEMENTATION ----------------- snkclient = pymongo.MongoClient("mongodb+srv://Prismadevops:[email protected]/stockdotshopdb?retryWrites=true&w=majority", tlsCAFile=ca) snkdb=snkclient['stockdotshopdb'] snkcoll = snkdb['sneakers'] # Issue the serverStatus command and print the results def core_connection_snk(): serverStatusResult = snkdb.command("serverStatus") pprint(serverStatusResult) print(snkdb) return'8=======D' def load_database_ryzen(): sneakerscur = list(snkcoll.find({})) # Calling DataFrame constructor on list sneakers = pd.DataFrame(sneakerscur) return sneakers def search_query_database(query): myquery2 = {"$text" : {"$search": query}} resultcursor = list(snkcoll.find(myquery2).limit(100)) result =
pd.DataFrame(resultcursor)
pandas.DataFrame
import copy import io import json import os import string from collections import OrderedDict from datetime import datetime from unittest import TestCase import numpy as np import pandas as pd import pytest import pytz from hypothesis import ( given, settings, ) from hypothesis.strategies import ( datetimes, integers, fixed_dictionaries, floats, just, lists, sampled_from, text, ) from pandas.testing import assert_frame_equal from tempfile import NamedTemporaryFile from oasislmf.utils.data import ( factorize_array, factorize_ndarray, fast_zip_arrays, get_dataframe, get_timestamp, get_utctimestamp, get_location_df, ) from oasislmf.utils.defaults import ( get_loc_dtypes, ) from oasislmf.utils.exceptions import OasisException def arrays_are_identical(expected, result): try: np.testing.assert_array_equal(expected, result) except AssertionError: raise return True class TestFactorizeArrays(TestCase): @settings(max_examples=10) @given( num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)), str_len=integers(min_value=2, max_value=100), num_strs=integers(min_value=10, max_value=100) ) def test_factorize_1darray(self, num_chars, str_len, num_strs): alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars) strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(num_strs)] expected_groups = list(OrderedDict({s: s for s in strings})) expected_enum = np.array([expected_groups.index(s) + 1 for s in strings]) result_enum, result_groups = factorize_array(strings) self.assertTrue(arrays_are_identical(expected_groups, result_groups)) self.assertTrue(arrays_are_identical(expected_enum, result_enum)) @settings(max_examples=1) @given( num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)), str_len=integers(min_value=2, max_value=100), rows=integers(min_value=10, max_value=100), cols=integers(min_value=10, max_value=100) ) def test_factorize_ndarray__no_row_or_col_indices_provided__raises_oasis_exception(self, num_chars, str_len, rows, cols): alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars) strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)] ndarr = np.random.choice(strings, (rows, cols)) with self.assertRaises(OasisException): factorize_ndarray(ndarr) @settings(max_examples=10, deadline=None) @given( num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)), str_len=integers(min_value=2, max_value=100), rows=integers(min_value=10, max_value=100), cols=integers(min_value=10, max_value=100), num_row_idxs=integers(min_value=2, max_value=10) ) def test_factorize_ndarray__by_row_idxs(self, num_chars, str_len, rows, cols, num_row_idxs): alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars) strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)] ndarr = np.random.choice(strings, (rows, cols)) row_idxs = np.random.choice(range(rows), num_row_idxs, replace=False).tolist() zipped = list(zip(*(ndarr[i, :] for i in row_idxs))) groups = list(OrderedDict({x: x for x in zipped})) expected_groups = np.empty(len(groups), dtype=object) expected_groups[:] = groups expected_enum = np.array([groups.index(x) + 1 for x in zipped]) result_enum, result_groups = factorize_ndarray(ndarr, row_idxs=row_idxs) self.assertTrue(arrays_are_identical(expected_groups, result_groups)) self.assertTrue(arrays_are_identical(expected_enum, result_enum)) @settings(max_examples=10, deadline=None) @given( num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)), str_len=integers(min_value=2, max_value=100), rows=integers(min_value=10, max_value=100), cols=integers(min_value=10, max_value=100), num_col_idxs=integers(min_value=2, max_value=10) ) def test_factorize_ndarray__by_col_idxs(self, num_chars, str_len, rows, cols, num_col_idxs): alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars) strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)] ndarr = np.random.choice(strings, (rows, cols)) col_idxs = np.random.choice(range(cols), num_col_idxs, replace=False).tolist() zipped = list(zip(*(ndarr[:, i] for i in col_idxs))) groups = list(OrderedDict({x: x for x in zipped})) expected_groups = np.empty(len(groups), dtype=object) expected_groups[:] = groups expected_enum = np.array([groups.index(x) + 1 for x in zipped]) result_enum, result_groups = factorize_ndarray(ndarr, col_idxs=col_idxs) self.assertTrue(arrays_are_identical(expected_groups, result_groups)) self.assertTrue(arrays_are_identical(expected_enum, result_enum)) class TestFastZipArrays(TestCase): @settings(max_examples=10) @given( array_len=integers(min_value=10, max_value=100), num_arrays=integers(2, 100) ) def test_fast_zip_arrays(self, array_len, num_arrays): arrays = np.random.randint(1, 10**6, (num_arrays, array_len)) li = list(zip(*arrays)) zipped = np.empty(len(li), dtype=object) zipped[:] = li result = fast_zip_arrays(*arrays) self.assertTrue(arrays_are_identical(zipped, result)) def dataframes_are_identical(df1, df2): try: assert_frame_equal(df1, df2) except AssertionError: return False return True class TestGetDataframe(TestCase): def test_get_dataframe__no_src_fp_or_buf_or_data_provided__oasis_exception_is_raised(self): with self.assertRaises(OasisException): get_dataframe(src_fp=None, src_buf=None, src_data=None) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'bool_col': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ) ) def test_get_dataframe__from_csv_file__use_default_options(self, data): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() expected = df.copy(deep=True) result = get_dataframe(src_fp=fp.name) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'FloatCol': floats(min_value=0.0, max_value=10.0), 'boolCol': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ) ) def test_get_dataframe__from_csv_file_with_mixed_case_cols__use_default_options(self, data): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() expected = df.copy(deep=True) expected.columns = expected.columns.str.lower() result = get_dataframe(src_fp=fp.name) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'bool_col': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ), dtypes=fixed_dictionaries({ 'int_col': sampled_from(['int32', 'int64']), 'float_col': sampled_from(['float32', 'float64']) }) ) def test_get_dataframe__from_csv_file__set_col_dtypes_option_and_use_defaults_for_all_other_options(self, data, dtypes): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame(data) for col, dtype in dtypes.items(): df[col] = df[col].astype(dtype) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() expected = pd.read_csv(fp.name, dtype=dtypes) result = get_dataframe(src_fp=fp.name, col_dtypes=dtypes) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'INT_COL': integers(min_value=1, max_value=10), 'FloatCol': floats(min_value=0.0, max_value=10.0), 'boolCol': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ), dtypes=fixed_dictionaries({ 'INT_COL': sampled_from(['int32', 'int64']), 'FloatCol': sampled_from(['float32', 'float64']) }) ) def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_col_dtypes_option_and_use_defaults_for_all_other_options(self, data, dtypes): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame(data) for col, dtype in dtypes.items(): df[col] = df[col].astype(dtype) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() expected = pd.read_csv(fp.name, dtype=dtypes) expected.columns = expected.columns.str.lower() result = get_dataframe(src_fp=fp.name, col_dtypes=dtypes) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given(empty_data_err_msg=text(min_size=1, max_size=10, alphabet=string.ascii_lowercase)) def test_get_dataframe__from_empty_csv_file__set_empty_data_err_msg_and_defaults_for_all_other_options__oasis_exception_is_raised_with_empty_data_err_msg(self, empty_data_err_msg): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame() df.to_csv(path_or_buf=fp) fp.close() with self.assertRaises(OasisException): try: get_dataframe(src_fp=fp.name, empty_data_error_msg=empty_data_err_msg) except OasisException as e: self.assertEqual(str(e), empty_data_err_msg) raise e finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'bool_col': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ), required=just( np.random.choice( ['str_col', 'int_col', 'float_col', 'bool_col', 'null_col'], np.random.choice(range(1, 6)), replace=False ).tolist() ) ) def test_get_dataframe__from_csv_file__set_required_cols_option_and_use_defaults_for_all_other_options(self, data, required): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() expected = df.copy(deep=True) result = get_dataframe( src_fp=fp.name, required_cols=required ) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'FloatCol': floats(min_value=0.0, max_value=10.0), 'boolCol': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ), required=just( np.random.choice( ['STR_COL', 'int_col', 'FloatCol', 'boolCol', 'null_col'], np.random.choice(range(1, 6)), replace=False ).tolist() ) ) def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_required_cols_option_and_use_defaults_for_all_other_options(self, data, required): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() expected = df.copy(deep=True) expected.columns = expected.columns.str.lower() result = get_dataframe( src_fp=fp.name, required_cols=required ) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'bool_col': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ), missing_cols=just( np.random.choice( ['str_col', 'int_col', 'float_col', 'bool_col', 'null_col'], np.random.choice(range(1, 5)), replace=False ).tolist() ) ) def test_get_dataframe__from_csv_file_missing_some_required_cols__set_required_cols_option_and_use_defaults_for_all_other_options__oasis_exception_is_raised(self, data, missing_cols): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame(data) df.drop(missing_cols, axis=1).to_csv(path_or_buf=fp, encoding='utf-8', index=False) fp.close() with self.assertRaises(OasisException): get_dataframe( src_fp=fp.name, required_cols=df.columns.tolist() ) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'FloatCol': floats(min_value=0.0, max_value=10.0), 'boolCol': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ), missing=just( np.random.choice( ['STR_COL', 'int_col', 'FloatCol', 'boolCol', 'null_col'], np.random.choice(range(1, 5)), replace=False ).tolist() ) ) def test_get_dataframe__from_csv_file_with_mixed_case_cols_and_missing_some_required_cols__set_required_cols_option_and_use_defaults_for_all_other_options__oasis_exception_is_raised(self, data, missing): fp = NamedTemporaryFile('w', delete=False) try: df = pd.DataFrame(data) df.drop(missing, axis=1).to_csv(path_or_buf=fp, encoding='utf-8', index=False) fp.close() with self.assertRaises(OasisException): get_dataframe( src_fp=fp.name, required_cols=df.columns.tolist() ) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'bool_col': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ), defaults=fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_uppercase), 'int_col': integers(min_value=0, max_value=10), 'float_col': floats(min_value=1.0, allow_infinity=False) }) ) def test_get_dataframe__from_csv_file__set_col_defaults_option_and_use_defaults_for_all_other_options(self, data, defaults): fp = NamedTemporaryFile("w", delete=False) try: df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() expected = df.copy(deep=True) for col, default in defaults.items(): expected.loc[:, col].fillna(defaults[col], inplace=True) result = get_dataframe(src_fp=fp.name, col_defaults=defaults) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'FloatCol': floats(min_value=0.0, max_value=10.0), 'boolCol': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ), defaults=fixed_dictionaries({ 'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_uppercase), 'int_col': integers(min_value=0, max_value=10), 'FloatCol': floats(min_value=1.0, allow_infinity=False) }) ) def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_col_defaults_option_and_use_defaults_for_all_other_options(self, data, defaults): fp = NamedTemporaryFile("w", delete=False) try: df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() expected = df.copy(deep=True) expected.columns = expected.columns.str.lower() for col, default in defaults.items(): expected.loc[:, col.lower()].fillna(defaults[col], inplace=True) result = get_dataframe(src_fp=fp.name, col_defaults=defaults) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=10, max_size=15, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'bool_col': sampled_from([True, False]) }), min_size=10, max_size=10 ) ) def test_get_dataframe__from_csv_file_with_nulls_in_some_columns__set_non_na_cols_option_and_use_defaults_for_all_other_options(self, data): fp = NamedTemporaryFile('w', delete=False) try: data[-1]['int_col'] = np.nan data[-2]['str_col'] = np.nan df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() non_na_cols = ['int_col', 'str_col'] expected = df.dropna(subset=non_na_cols, axis=0) result = get_dataframe(src_fp=fp.name, non_na_cols=non_na_cols) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'FloatCol': floats(min_value=0.0, max_value=10.0), 'boolCol': sampled_from([True, False]) }), min_size=10, max_size=10 ) ) def test_get_dataframe__from_csv_file_with_mixed_case_cols_and_nulls_in_some_columns__set_non_na_cols_option_and_use_defaults_for_all_other_options(self, data): fp = NamedTemporaryFile("w", delete=False) try: data[-1]['int_col'] = np.nan data[-2]['STR_COL'] = np.nan df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() non_na_cols = ['int_col', 'STR_COL'] expected = df.dropna(subset=non_na_cols, axis=0) expected.columns = expected.columns.str.lower() result = get_dataframe(src_fp=fp.name, non_na_cols=non_na_cols) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'bool_col': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ) ) def test_get_dataframe__from_csv_file__set_sort_cols_option_on_single_col_and_use_defaults_for_all_other_options(self, data): fp = NamedTemporaryFile("w", delete=False) try: data = [{k: (v if k != 'int_col' else np.random.choice(range(10))) for k, v in it.items()} for it in data] df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() sort_cols = ['int_col'] expected = df.sort_values(sort_cols, axis=0) result = get_dataframe(src_fp=fp.name, sort_cols=sort_cols) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'IntCol': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'boolCol': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ) ) def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_sort_cols_option_on_single_col_and_use_defaults_for_all_other_options(self, data): fp = NamedTemporaryFile("w", delete=False) try: data = [{k: (v if k != 'IntCol' else np.random.choice(range(10))) for k, v in it.items()} for it in data] df = pd.DataFrame(data) df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False) fp.close() sort_cols = ['IntCol'] expected = df.sort_values(sort_cols, axis=0) expected.columns = expected.columns.str.lower() result = get_dataframe(src_fp=fp.name, sort_cols=sort_cols) self.assertTrue(dataframes_are_identical(result, expected)) finally: os.remove(fp.name) @settings(max_examples=10) @given( data=lists( fixed_dictionaries({ 'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase), 'int_col': integers(min_value=1, max_value=10), 'float_col': floats(min_value=0.0, max_value=10.0), 'bool_col': sampled_from([True, False]), 'null_col': just(np.nan) }), min_size=10, max_size=10 ) ) def test_get_dataframe__from_csv_file__set_sort_cols_option_on_two_cols_and_use_defaults_for_all_other_options(self, data): fp = NamedTemporaryFile("w", delete=False) try: data = [ {k: (v if k not in ('int_col', 'str_col') else (np.random.choice(range(10)) if k == 'int_col' else np.random.choice(list(string.ascii_lowercase)))) for k, v in it.items()} for it in data ] df =
pd.DataFrame(data)
pandas.DataFrame
#%% import sys sys.path.append('..') from network_evaluation_tools import data_import_tools as dit from network_evaluation_tools import network_evaluation_functions as nef from network_evaluation_tools import network_propagation as prop import pandas as pd import numpy as np import pickle import os #%% # Load network (We choose a smaller network here for the example's sake) network = dit.load_network_file('../Data/string_edge_list_common_names.tsv', verbose=True, delimiter='\t') print(len(network.nodes)) #%% # Load gene sets for analysis genesets = dit.load_node_sets('../Data/DisGeNET_genesets.txt') #%% # Calculate geneset sub-sample rate genesets_p = nef.calculate_p(network, genesets) #%% # Determine optimal alpha for network (can also be done automatically by next step) alpha = prop.calculate_alpha(network) print(alpha) #%% import networkx as nx print(len(network.nodes)) subs = list(nx.connected_component_subgraphs(network)) print(subs) #%% # Calculate network kernel for propagation kernel = nef.construct_prop_kernel(network, alpha=alpha, verbose=True) #%% print(kernel.index) print(genesets) #%% # Calculate the AUPRC values for each gene set AUPRC_values = nef.small_network_AUPRC_wrapper(kernel, genesets, genesets_p, n=30, cores=4, verbose=True) #%% md #**Note about the above cell:** There are a several options for this particular step depending on the computational resources available and network size. If the network is sufficiently small (<250k edges), it is recommended to use the 'small_network_AUPRC_wrapper' function as it can be much faster, especially when run in parallel (at least 8G per core is recommended). If you would like to parallelize the AUPRC calculation with a larger network (between 250K and 2.5M edges), at least 16G per core is recommended, 32G per core if the network contains more than 2.5M edges. For larger networks, it is recommended to use the 'large_network_AUPRC_wrapper', which may be a slightly slower function, but more equipped to handle the larger memory footprint(required. To change the parllelization status of the function, change the 'cores' option to the number of threads you would like to utilize.) #%% # Construct null networks and calculate the AUPRC of the gene sets of the null networks # We can use the AUPRC wrapper function for this if os.path.exists('null_AUPRCs.pickle'): null_AUPRCs = pickle.load(open('null_AUPRCs.pickle','rb')) else: null_AUPRCs = [] for i in range(10): shuffNet = nef.shuffle_network(network, max_tries_n=10, verbose=True) shuffNet_kernel = nef.construct_prop_kernel(shuffNet, alpha=alpha, verbose=False) shuffNet_AUPRCs = nef.small_network_AUPRC_wrapper(shuffNet_kernel, genesets, genesets_p, n=30, cores=4, verbose=False) null_AUPRCs.append(shuffNet_AUPRCs) print('shuffNet', repr(i+1), 'AUPRCs calculated') pickle.dump(null_AUPRCs,open('null_AUPRCs.pickle','wb')) #%% md #**Note about the above cell:** We use a small number to calculate the null AUPRC values, but a larger number of shuffled networks may give a better representation of the true null AUPRC value. smaller number of networks here for this example, but larger numbers can be used, especially if the resulting distribution of null AUPRCs has a high variance relative to the actual AUPRC values, but we have found that the variance remains relatively small even with a small number of shuffled networks. #%% # Construct table of null AUPRCs if os.path.exists('null_AUPRCs_table.pickle'): null_AUPRCs_table = pickle.load(open('null_AUPRCs_table.pickle','rb')) else: null_AUPRCs_table = pd.concat(null_AUPRCs, axis=1) null_AUPRCs_table.columns = ['shuffNet'+repr(i+1) for i in range(len(null_AUPRCs))] pickle.dump(null_AUPRCs_table,open('null_AUPRCs_table.pickle','wb')) #%% # Calculate performance metric of gene sets if os.path.exists('network_performance.pickle'): network_performance = pickle.load(open('network_performance.pickle','rb')) else: network_performance = nef.calculate_network_performance_score(AUPRC_values, null_AUPRCs_table, verbose=True) network_performance.name = 'Test Network' pickle.dump(network_performance,open('network_performance.pickle','wb')) #%% # Calculate network performance gain over median null AUPRC if os.path.exists('network_perf_gain.pickle'): network_performance = pickle.load(open('network_perf_gain.pickle','rb')) else: network_perf_gain = nef.calculate_network_performance_gain(AUPRC_values, null_AUPRCs_table, verbose=True) network_perf_gain.name = 'Test Network' pickle.dump(network_perf_gain,open('network_perf_gain.pickle','wb')) #%% # Rank network on average performance across gene sets vs performance on same gene sets in previous network set if os.path.exists('all_network_performance.pickle'): network_performance = pickle.load(open('all_network_performance.pickle','rb')) all_network_performance_filt = pickle.load(open('all_network_performance_filt.pickle','rb')) network_performance_rank_table = pickle.load(open('network_performance_rank_table.pickle','rb')) network_performance_rankings = pickle.load(open('network_perf_gain.pickle','rb')) else: all_network_performance = pd.read_csv('~/Data/Network_Performance.csv', index_col=0) all_network_performance_filt = pd.concat([network_performance, all_network_performance.ix[network_performance.index]], axis=1) network_performance_rank_table = all_network_performance_filt.rank(axis=1, ascending=False) network_performance_rankings = network_performance_rank_table['Test Network'] pickle.dump(all_network_performance,open('all_network_performance.pickle','wb')) pickle.dump(all_network_performance_filt,open('all_network_performance_filt.pickle','wb')) pickle.dump(network_performance_rank_table,open('network_performance_rank_table.pickle','wb')) pickle.dump(network_performance_rankings,open('network_performance_rankings.pickle','wb')) #%% # Rank network on average performance gain across gene sets vs performance gain on same gene sets in previous network set all_network_perf_gain = pd.read_csv('~/Data/Network_Performance_Gain.csv', index_col=0) all_network_perf_gain_filt = pd.concat([network_perf_gain, all_network_perf_gain.ix[network_perf_gain.index]], axis=1) network_perf_gain_rank_table = all_network_performance_filt.rank(axis=1, ascending=False) network_perf_gain_rankings = network_perf_gain_rank_table['Test Network'] #%% # Network Performance network_performance_metric_ranks =
pd.concat([network_performance, network_performance_rankings, network_perf_gain, network_perf_gain_rankings], axis=1)
pandas.concat
''' Reads in literature metallicities and makes new Fe/H basis ''' import pickle import sys import pandas as pd import numpy as np import matplotlib.pyplot as plt from astroquery.simbad import Simbad from . import * class LitFehRaw(): ''' Read in Fe/H values from the literature, before making any transformations ''' def __init__(self): # map the raw data to object # source_dir=config_red["data_dirs"]["DIR_LIT_HIGH_RES_FEH"]): source_dir = "/Users/bandari/Documents/git.repos/rrlfe/src/high_res_feh/" # stand-in that consists of our program star names self.df_our_program_stars = pd.read_csv(source_dir + "our_program_stars_names_only.csv") # Fe/H from Layden+ 1994; this may serve as the common basis for RRabs self.df_layden_feh = pd.read_csv(source_dir + "layden_1994_abundances.dat") # RES: "rather low" # Fe/H Clementini+ 1995 self.df_clementini_feh = pd.read_csv(source_dir + "clementini_1995_abundances.dat") # Fe/H Fernley+ 1996 self.df_fernley96_feh = pd.read_csv(source_dir + "fernley_1996_abundances.dat") # RES: 60,000, FeI & FeII, 5900-8100 A # Fe/H from Fernley+ 1997 self.df_fernley97_feh = pd.read_csv(source_dir + "fernley_1997_abundances.dat") # RES: 60,000, two FeII lines, 5900-8100 A # log(eps) from Lambert+ 1996 self.df_lambert_logeps = pd.read_csv(source_dir + "lambert_1996_abundances.dat") # RES: ~23,000, FeII + photometric models, 3600-9000 A # Fe/H from Wallerstein and Huang 2010, arXiv 1004.2017 self.df_wallerstein_feh = pd.read_csv(source_dir + "wallerstein_huang_2010_abundances.dat") # RES: ~30,000, FeII # Fe/H from Chadid+ 2017 ApJ 835.2:187 (FeI and II lines) self.df_chadid_feh = pd.read_csv(source_dir + "chadid_2017_abundances.dat") # RES: 38000, FeI & FeII, 3400-9900 A # Fe/H from Liu+ 2013 Res Ast Astroph 13:1307 self.df_liu_feh = pd.read_csv(source_dir + "liu_2013_abundances.dat") # RES: ~60,000, FeI (& FeII?), 5100-6400 A # Fe/H from Nemec+ 2013 self.df_nemec_feh = pd.read_csv(source_dir + "nemec_2013_abundances.dat") # RES: ~65,000 or 36,000, FeI & FeII, 5150-5200 A # Fe/H from Solano+ 1997 self.df_solano_feh = pd.read_csv(source_dir + "solano_1997_abundances.dat") # RES: 22,000 & 19,000, strong FeI lines, 4160-4390 & 4070-4490 A # Fe/H from Pancino+ 2015 MNRAS 447:2404 self.df_pancino_feh = pd.read_csv(source_dir + "pancino_2015_abundances.dat") # RES: >30,000, FeI (weighted average), 4000-8500 A # Fe/H from Sneden+ 2017 self.df_sneden_feh = pd.read_csv(source_dir + "sneden_2017_abundances.dat", delimiter="|") # RES: ~27,000 (at 5000 A), FeI & FeII, 3400-9000 A # Fe/H from Crestani+ 2021 self.df_crestani_feh = pd.read_csv(source_dir + "crestani_2021_abundances.dat", delimiter=",") # Fe/H from Kemper+ 1982; this might serve as the common basis for RRcs self.df_kemper_feh = pd.read_csv(source_dir + "kemper_1982_abundances.dat") # Fe/H from Govea+ 2014 ## ## note: Govea+ has abundances for each phase value, and this ## ## includes NLTE phases; how to get single Fe/H? self.df_govea_feh = pd.read_csv(source_dir + "govea_2014_abundances.dat") def map_names(df_pass): # find common ASAS names import ipdb; ipdb.set_trace() # treat each lit source individually to get single Fe/H and error # loop over rows, parse as necessary for row_num in range(0,len(df_pass)): name_initial = df_pass["name"] def matchmaker(basis_table_pass, input_table_pass): ''' Find what stars are common to two input tables, return arrays of FeHs, fit best-fit line INPUTS: input_table: table I'm interested in checking for overlapping stars (pandas dataframe with col ["name_match"]: star name; col ["feh_single"]: Fe/H) basis_table: table with the names for which I am looking for repeats in the other table (pandas dataframe with col ["name_match"]: star name; col ["feh_single"]: Fe/H) OUTPUTS: pandas dataframe with 1. overlapping star names 2. FeHs from the input_table 3. FeHs from the basis_table 4. residuals in FeH: FeH_input - FeH_basis 5. string indicating the high-res dataset being matched ''' basis_table = basis_table_pass.copy(deep=True) input_table = input_table_pass.copy(deep=True) # make all strings lowercase, to make case insensitive match basis_table['name_match'] = basis_table['name_match'].str.lower() input_table['name_match'] = input_table['name_match'].str.lower() merged_table = basis_table.merge(input_table, how="inner", on="name_match", suffixes=("_basis", "_lit")) return merged_table def find_offsets(match_pass): ''' Finds the offsets that need to be added in to overlap datasets as per Crestani+ 2017 Fig. 6 INPUTS: match_pass: the dataframe holding matched Fe/Hs OUTPUTS: y_offset_2_lit: the constant offset that needs to be added back in ''' # find offset between (lit vs. Layden) residuals and Chadid+ 2017 at Fe/H=-1.25 (see their Fig. 6) chadid_y_125 = -0.10583621694962 # from Chadid line at Fe/H=-1.25 feh_basis_loc = -1.25 # corresponding x- value (Fe/H in the basis dataset) m_lit, b_lit = np.polyfit(match_pass["feh_single_basis"],np.subtract(match_pass["feh_single_lit"],match_pass["feh_single_basis"]),1) # find offset between residuals and zero y_offset_lit = m_lit*feh_basis_loc + b_lit # offset between the offsets ("offset_2"); this is what needs to be added in # to bring it in line with lit y_offset_2_lit = chadid_y_125 - y_offset_lit return y_offset_2_lit def main(): # read in raw raws = LitFehRaw() # make transformations to get single Fe/H value # convert outputs in astropy.Table format to DataFrames df_our_stars = pd.DataFrame(raws.df_our_program_stars) df_govea = pd.DataFrame(raws.df_govea_feh) #avg df_layden = pd.DataFrame(raws.df_layden_feh) # simple df_clementini =
pd.DataFrame(raws.df_clementini_feh)
pandas.DataFrame
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import itertools import pytest import numpy as np import pandas as pd from pandas.util.testing import assert_frame_equal from reco_utils.common.constants import DEFAULT_PREDICTION_COL from reco_utils.recommender.sar.sar_singlenode import SARSingleNode from reco_utils.recommender.sar import TIME_NOW from tests.sar_common import read_matrix, load_userpred, load_affinity def _rearrange_to_test(array, row_ids, col_ids, row_map, col_map): """Rearranges SAR array into test array order""" if row_ids is not None: row_index = [row_map[x] for x in row_ids] array = array[row_index, :] if col_ids is not None: col_index = [col_map[x] for x in col_ids] array = array[:, col_index] return array def test_init(header): model = SARSingleNode(remove_seen=True, similarity_type="jaccard", **header) assert model.col_user == "UserId" assert model.col_item == "MovieId" assert model.col_rating == "Rating" # TODO: add more parameters @pytest.mark.parametrize( "similarity_type, timedecay_formula", [("jaccard", False), ("lift", True)] ) def test_fit(similarity_type, timedecay_formula, train_test_dummy_timestamp, header): model = SARSingleNode( similarity_type=similarity_type, timedecay_formula=timedecay_formula, **header ) trainset, testset = train_test_dummy_timestamp model.fit(trainset) @pytest.mark.parametrize( "similarity_type, timedecay_formula", [("jaccard", False), ("lift", True)] ) def test_predict( similarity_type, timedecay_formula, train_test_dummy_timestamp, header ): model = SARSingleNode( similarity_type=similarity_type, timedecay_formula=timedecay_formula, **header ) trainset, testset = train_test_dummy_timestamp model.fit(trainset) preds = model.predict(testset) assert len(preds) == 2 assert isinstance(preds, pd.DataFrame) assert preds[header["col_user"]].dtype == trainset[header["col_user"]].dtype assert preds[header["col_item"]].dtype == trainset[header["col_item"]].dtype assert preds[DEFAULT_PREDICTION_COL].dtype == trainset[header["col_rating"]].dtype def test_predict_all_items(train_test_dummy_timestamp, header): model = SARSingleNode(**header) trainset, _ = train_test_dummy_timestamp model.fit(trainset) user_items = itertools.product( trainset[header["col_user"]].unique(), trainset[header["col_item"]].unique() ) testset = pd.DataFrame(user_items, columns=[header["col_user"], header["col_item"]]) preds = model.predict(testset) assert len(preds) == len(testset) assert isinstance(preds, pd.DataFrame) assert preds[header["col_user"]].dtype == trainset[header["col_user"]].dtype assert preds[header["col_item"]].dtype == trainset[header["col_item"]].dtype assert preds[DEFAULT_PREDICTION_COL].dtype == trainset[header["col_rating"]].dtype @pytest.mark.parametrize( "threshold,similarity_type,file", [ (1, "cooccurrence", "count"), (1, "jaccard", "jac"), (1, "lift", "lift"), (3, "cooccurrence", "count"), (3, "jaccard", "jac"), (3, "lift", "lift"), ], ) def test_sar_item_similarity( threshold, similarity_type, file, demo_usage_data, sar_settings, header ): model = SARSingleNode( similarity_type=similarity_type, timedecay_formula=False, time_decay_coefficient=30, time_now=TIME_NOW, threshold=threshold, **header ) model.fit(demo_usage_data) true_item_similarity, row_ids, col_ids = read_matrix( sar_settings["FILE_DIR"] + "sim_" + file + str(threshold) + ".csv" ) if similarity_type is "cooccurrence": test_item_similarity = _rearrange_to_test( model.item_similarity.todense(), row_ids, col_ids, model.item2index, model.item2index, ) assert np.array_equal( true_item_similarity.astype(test_item_similarity.dtype), test_item_similarity, ) else: test_item_similarity = _rearrange_to_test( model.item_similarity, row_ids, col_ids, model.item2index, model.item2index ) assert np.allclose( true_item_similarity.astype(test_item_similarity.dtype), test_item_similarity, atol=sar_settings["ATOL"], ) def test_user_affinity(demo_usage_data, sar_settings, header): time_now = demo_usage_data[header["col_timestamp"]].max() model = SARSingleNode( similarity_type="cooccurrence", timedecay_formula=True, time_decay_coefficient=30, time_now=time_now, **header ) model.fit(demo_usage_data) true_user_affinity, items = load_affinity(sar_settings["FILE_DIR"] + "user_aff.csv") user_index = model.user2index[sar_settings["TEST_USER_ID"]] sar_user_affinity = np.reshape( np.array( _rearrange_to_test( model.user_affinity, None, items, None, model.item2index )[user_index,].todense() ), -1, ) assert np.allclose( true_user_affinity.astype(sar_user_affinity.dtype), sar_user_affinity, atol=sar_settings["ATOL"], ) @pytest.mark.parametrize( "threshold,similarity_type,file", [(3, "cooccurrence", "count"), (3, "jaccard", "jac"), (3, "lift", "lift")], ) def test_recommend_k_items( threshold, similarity_type, file, header, sar_settings, demo_usage_data ): time_now = demo_usage_data[header["col_timestamp"]].max() model = SARSingleNode( similarity_type=similarity_type, timedecay_formula=True, time_decay_coefficient=30, time_now=time_now, threshold=threshold, **header ) model.fit(demo_usage_data) true_items, true_scores = load_userpred( sar_settings["FILE_DIR"] + "userpred_" + file + str(threshold) + "_userid_only.csv" ) test_results = model.recommend_k_items( demo_usage_data[ demo_usage_data[header["col_user"]] == sar_settings["TEST_USER_ID"] ], top_k=10, sort_top_k=True, remove_seen=True, ) test_items = list(test_results[header["col_item"]]) test_scores = np.array(test_results["prediction"]) assert true_items == test_items assert np.allclose(true_scores, test_scores, atol=sar_settings["ATOL"]) def test_get_item_based_topk(header, pandas_dummy): sar = SARSingleNode(**header) sar.fit(pandas_dummy) # test with just items provided expected = pd.DataFrame( dict(UserId=[0, 0, 0], MovieId=[8, 7, 6], prediction=[2.0, 2.0, 2.0]) ) items = pd.DataFrame({header["col_item"]: [1, 5, 10]}) actual = sar.get_item_based_topk(items, top_k=3) assert_frame_equal(expected, actual) # test with items and users expected = pd.DataFrame( dict( UserId=[100, 100, 100, 1, 1, 1], MovieId=[8, 7, 6, 4, 3, 10], prediction=[2.0, 2.0, 2.0, 2.0, 2.0, 1.0], ) ) items = pd.DataFrame( { header["col_user"]: [100, 100, 1, 100, 1, 1], header["col_item"]: [1, 5, 1, 10, 2, 6], } ) actual = sar.get_item_based_topk(items, top_k=3, sort_top_k=True) assert_frame_equal(expected, actual) # test with items, users, and ratings expected = pd.DataFrame( dict( UserId=[100, 100, 100, 1, 1, 1], MovieId=[2, 4, 3, 4, 3, 10], prediction=[5.0, 5.0, 5.0, 8.0, 8.0, 4.0], ) ).set_index(['UserId', 'MovieId']) items = pd.DataFrame( { header["col_user"]: [100, 100, 1, 100, 1, 1], header["col_item"]: [1, 5, 1, 10, 2, 6], header["col_rating"]: [5, 1, 3, 1, 5, 4], } ) actual = sar.get_item_based_topk(items, top_k=3).set_index(['UserId', 'MovieId'])
assert_frame_equal(expected, actual, check_like=True)
pandas.util.testing.assert_frame_equal
import logging import requests import time import pandas as pd from bs4 import BeautifulSoup from selenium import webdriver from selenium.webdriver import Chrome from selenium.webdriver.common.keys import Keys from selenium.webdriver.chrome.options import Options def get_youtube_data(): options = webdriver.ChromeOptions() youtube_url = "https://schedule.hololive.tv/lives" # 스케줄표를 기준으로 데이터를 작은 원형 케릭터로 출력 # 창 숨기기 options.add_argument("headless") # 프록시 사용 PROXY_ADDRESS = "172.16.31.10:9999" # IP:Port #변경 필요 webdriver.DesiredCapabilities.CHROME['proxy'] = { "httpProxy": PROXY_ADDRESS, "ftpProxy": PROXY_ADDRESS, "sslProxy": PROXY_ADDRESS, "proxyType": "MANUAL" } # 크롤링한 내용을 저장할 엑셀 파일 설정 writer = pd.ExcelWriter('airpage.xlsx', engine='openpyxl') # 드라이버 설정 (드라이버가 있는 경로를 정확하게 지정 필요) browser = webdriver.Chrome("./chromedriver.exe", options=options) delay = 1 browser.implicitly_wait(delay) target_url = youtube_url + "/c/dlwlrma/featured" # iu browser.get(target_url) # 화면 크기 최대화 browser.maximize_window() browser.implicitly_wait(delay) # 동영상 탭 클릭 browser.implicitly_wait(delay) browser.find_element_by_xpath('//*[@id="tabsContent"]/tp-yt-paper-tab[2]').click() body = browser.find_element_by_tag_name('body') # 스크롤을 한참 내려서 (가능하면 큰 수) body 내용 확보하기 scroll_count = 250 while scroll_count: body.send_keys(Keys.PAGE_DOWN) browser.implicitly_wait(delay) scroll_count -= 1 html0 = browser.page_source html = BeautifulSoup(html0,'html.parser') # 영상 목록 획득 video_datas = html.find_all('ytd-grid-video-renderer',{'class':'style-scope ytd-grid-renderer'}) video_url_list = [] for i in range(len(video_datas)): url = youtube_url + video_datas[i].find('a',{'id':'thumbnail'})['href'] video_url_list.append(url) dataframe = pd.DataFrame({ 'name':[], 'view_count':[], 'youtube_url':[], 'date':[], 'desc' : [] }) for i in range(3): #for i in range(len(video_datas)): name = video_datas[i].find('a',{'id':'video-title'}).text url = youtube_url + video_datas[i].find('a',{'id':'thumbnail'})['href'] for_view_count =video_datas[i].find('div',{'id':'metadata-line'}) view_count = for_view_count.find_all('span',{'class':'style-scope ytd-grid-video-renderer'})[0].text.split()[1] cur_url = video_url_list[i] browser.get(cur_url) time.sleep(5) body = browser.find_element_by_tag_name('body') body.send_keys(Keys.PAGE_DOWN) time.sleep(5) html0 = browser.page_source html = BeautifulSoup(html0,'html.parser') moreBtn = browser.find_element_by_xpath('//*[@class="more-button style-scope ytd-video-secondary-info-renderer"]') try: if moreBtn is not None: moreBtn.click() except: pass r_date = html.find('div',{'id':'info-strings'}).find('yt-formatted-string').text desc = html.find_all('yt-formatted-string',{'class' : 'content style-scope ytd-video-secondary-info-renderer'})[0].text insert_data =
pd.DataFrame({ 'name':[name], 'view_count':[view_count], 'youtube_url':[url], 'date':[r_date], 'desc' : [desc] })
pandas.DataFrame
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.version import LooseVersion import inspect import numpy as np import pandas as pd import pyspark import databricks.koalas as ks from databricks.koalas.exceptions import PandasNotImplementedError from databricks.koalas.missing.indexes import _MissingPandasLikeIndex, _MissingPandasLikeMultiIndex from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils class IndexesTest(ReusedSQLTestCase, TestUtils): @property def pdf(self): return pd.DataFrame({ 'a': [1, 2, 3, 4, 5, 6, 7, 8, 9], 'b': [4, 5, 6, 3, 2, 1, 0, 0, 0], }, index=[0, 1, 3, 5, 6, 8, 9, 9, 9]) @property def kdf(self): return ks.from_pandas(self.pdf) def test_index(self): for pdf in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')), pd.DataFrame(np.random.randn(10, 5), index=pd.date_range('2011-01-01', freq='D', periods=10)), pd.DataFrame(np.random.randn(10, 5), columns=list('abcde')).set_index(['a', 'b'])]: kdf = ks.from_pandas(pdf) self.assert_eq(kdf.index, pdf.index) def test_index_getattr(self): kidx = self.kdf.index item = 'databricks' expected_error_message = ("'Index' object has no attribute '{}'".format(item)) with self.assertRaisesRegex(AttributeError, expected_error_message): kidx.__getattr__(item) def test_multi_index_getattr(self): arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) pdf = pd.DataFrame(np.random.randn(4, 5), idx) kdf = ks.from_pandas(pdf) kidx = kdf.index item = 'databricks' expected_error_message = ("'MultiIndex' object has no attribute '{}'".format(item)) with self.assertRaisesRegex(AttributeError, expected_error_message): kidx.__getattr__(item) def test_to_series(self): pidx = self.pdf.index kidx = self.kdf.index self.assert_eq(kidx.to_series(), pidx.to_series()) self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a')) # FIXME: the index values are not addressed the change. (#1190) # self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series()) pidx = self.pdf.set_index('b', append=True).index kidx = self.kdf.set_index('b', append=True).index with self.sql_conf({'spark.sql.execution.arrow.enabled': False}): self.assert_eq(kidx.to_series(), pidx.to_series()) self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a')) def test_to_frame(self): pidx = self.pdf.index kidx = self.kdf.index self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame())) self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False))) pidx.name = 'a' kidx.name = 'a' self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame())) self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False))) if LooseVersion(pd.__version__) >= LooseVersion('0.24'): # The `name` argument is added in pandas 0.24. self.assert_eq(repr(kidx.to_frame(name='x')), repr(pidx.to_frame(name='x'))) self.assert_eq(repr(kidx.to_frame(index=False, name='x')), repr(pidx.to_frame(index=False, name='x'))) pidx = self.pdf.set_index('b', append=True).index kidx = self.kdf.set_index('b', append=True).index self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame())) self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False))) if LooseVersion(pd.__version__) >= LooseVersion('0.24'): # The `name` argument is added in pandas 0.24. self.assert_eq(repr(kidx.to_frame(name=['x', 'y'])), repr(pidx.to_frame(name=['x', 'y']))) self.assert_eq(repr(kidx.to_frame(index=False, name=['x', 'y'])), repr(pidx.to_frame(index=False, name=['x', 'y']))) def test_index_names(self): kdf = self.kdf self.assertIsNone(kdf.index.name) idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x') pdf = pd.DataFrame(np.random.randn(10, 5), idx) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.name, pdf.index.name) self.assertEqual(kdf.index.names, pdf.index.names) pidx = pdf.index kidx = kdf.index pidx.name = 'renamed' kidx.name = 'renamed' self.assertEqual(kidx.name, pidx.name) self.assertEqual(kidx.names, pidx.names) self.assert_eq(kidx, pidx) pidx.name = None kidx.name = None self.assertEqual(kidx.name, pidx.name) self.assertEqual(kidx.names, pidx.names) self.assert_eq(kidx, pidx) with self.assertRaisesRegex(ValueError, "Names must be a list-like"): kidx.names = 'hi' expected_error_message = ("Length of new names must be {}, got {}" .format(len(kdf._internal.index_map), len(['0', '1']))) with self.assertRaisesRegex(ValueError, expected_error_message): kidx.names = ['0', '1'] def test_multi_index_names(self): arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) pdf = pd.DataFrame(np.random.randn(4, 5), idx) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.names, pdf.index.names) pidx = pdf.index kidx = kdf.index pidx.names = ['renamed_number', 'renamed_color'] kidx.names = ['renamed_number', 'renamed_color'] self.assertEqual(kidx.names, pidx.names) pidx.names = ['renamed_number', None] kidx.names = ['renamed_number', None] self.assertEqual(kidx.names, pidx.names) if LooseVersion(pyspark.__version__) < LooseVersion('2.4'): # PySpark < 2.4 does not support struct type with arrow enabled. with self.sql_conf({'spark.sql.execution.arrow.enabled': False}): self.assert_eq(kidx, pidx) else: self.assert_eq(kidx, pidx) with self.assertRaises(PandasNotImplementedError): kidx.name with self.assertRaises(PandasNotImplementedError): kidx.name = 'renamed' def test_index_rename(self): pdf = pd.DataFrame(np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')) kdf = ks.from_pandas(pdf) pidx = pdf.index kidx = kdf.index self.assert_eq(kidx.rename('y'), pidx.rename('y')) self.assert_eq(kdf.index.names, pdf.index.names) kidx.rename('z', inplace=True) pidx.rename('z', inplace=True) self.assert_eq(kidx, pidx) self.assert_eq(kdf.index.names, pdf.index.names) self.assert_eq(kidx.rename(None), pidx.rename(None)) self.assert_eq(kdf.index.names, pdf.index.names) def test_multi_index_rename(self): arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) pdf = pd.DataFrame(np.random.randn(4, 5), idx) kdf = ks.from_pandas(pdf) pmidx = pdf.index kmidx = kdf.index self.assert_eq(kmidx.rename(['n', 'c']), pmidx.rename(['n', 'c'])) self.assert_eq(kdf.index.names, pdf.index.names) kmidx.rename(['num', 'col'], inplace=True) pmidx.rename(['num', 'col'], inplace=True) self.assert_eq(kmidx, pmidx) self.assert_eq(kdf.index.names, pdf.index.names) self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None])) self.assert_eq(kdf.index.names, pdf.index.names) self.assertRaises(TypeError, lambda: kmidx.rename('number')) self.assertRaises(ValueError, lambda: kmidx.rename(['number'])) def test_multi_index_levshape(self): pidx = pd.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)]) kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)]) self.assertEqual(pidx.levshape, kidx.levshape) def test_index_unique(self): kidx = self.kdf.index # here the output is different than pandas in terms of order expected = [0, 1, 3, 5, 6, 8, 9] self.assert_eq(expected, sorted(kidx.unique().to_pandas())) self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas())) expected = [1, 2, 4, 6, 7, 9, 10] self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas())) with self.assertRaisesRegexp(IndexError, "Too many levels*"): kidx.unique(level=1) with self.assertRaisesRegexp(KeyError, "Requested level (hi)*"): kidx.unique(level='hi') def test_multi_index_copy(self): arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) pdf = pd.DataFrame(np.random.randn(4, 5), idx) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.index.copy(), pdf.index.copy()) def test_index_symmetric_difference(self): idx = ks.Index(['a', 'b', 'c']) midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) with self.assertRaisesRegexp(NotImplementedError, "Doesn't support*"): idx.symmetric_difference(midx) def test_multi_index_symmetric_difference(self): idx = ks.Index(['a', 'b', 'c']) midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) midx_ = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) self.assert_eq( midx.symmetric_difference(midx_), midx.to_pandas().symmetric_difference(midx_.to_pandas())) with self.assertRaisesRegexp(NotImplementedError, "Doesn't support*"): midx.symmetric_difference(idx) def test_missing(self): kdf = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}) # Index functions missing_functions = inspect.getmembers(_MissingPandasLikeIndex, inspect.isfunction) unsupported_functions = [name for (name, type_) in missing_functions if type_.__name__ == 'unsupported_function'] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)): getattr(kdf.set_index('a').index, name)() deprecated_functions = [name for (name, type_) in missing_functions if type_.__name__ == 'deprecated_function'] for name in deprecated_functions: with self.assertRaisesRegex(PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)): getattr(kdf.set_index('a').index, name)() # MultiIndex functions missing_functions = inspect.getmembers(_MissingPandasLikeMultiIndex, inspect.isfunction) unsupported_functions = [name for (name, type_) in missing_functions if type_.__name__ == 'unsupported_function'] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)): getattr(kdf.set_index(['a', 'b']).index, name)() deprecated_functions = [name for (name, type_) in missing_functions if type_.__name__ == 'deprecated_function'] for name in deprecated_functions: with self.assertRaisesRegex(PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)): getattr(kdf.set_index(['a', 'b']).index, name)() # Index properties missing_properties = inspect.getmembers(_MissingPandasLikeIndex, lambda o: isinstance(o, property)) unsupported_properties = [name for (name, type_) in missing_properties if type_.fget.__name__ == 'unsupported_property'] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)): getattr(kdf.set_index('a').index, name) deprecated_properties = [name for (name, type_) in missing_properties if type_.fget.__name__ == 'deprecated_property'] for name in deprecated_properties: with self.assertRaisesRegex(PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)): getattr(kdf.set_index('a').index, name) # MultiIndex properties missing_properties = inspect.getmembers(_MissingPandasLikeMultiIndex, lambda o: isinstance(o, property)) unsupported_properties = [name for (name, type_) in missing_properties if type_.fget.__name__ == 'unsupported_property'] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)): getattr(kdf.set_index(['a', 'b']).index, name) deprecated_properties = [name for (name, type_) in missing_properties if type_.fget.__name__ == 'deprecated_property'] for name in deprecated_properties: with self.assertRaisesRegex(PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)): getattr(kdf.set_index(['a', 'b']).index, name) def test_index_has_duplicates(self): indexes = [("a", "b", "c"), ("a", "a", "c"), (1, 3, 3), (1, 2, 3)] names = [None, 'ks', 'ks', None] has_dup = [False, True, True, False] for idx, name, expected in zip(indexes, names, has_dup): pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(idx, name=name)) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.has_duplicates, expected) def test_multiindex_has_duplicates(self): indexes = [[list("abc"), list("edf")], [list("aac"), list("edf")], [list("aac"), list("eef")], [[1, 4, 4], [4, 6, 6]]] has_dup = [False, False, True, True] for idx, expected in zip(indexes, has_dup): pdf = pd.DataFrame({"a": [1, 2, 3]}, index=idx) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.has_duplicates, expected) def test_multi_index_not_supported(self): kdf = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}) with self.assertRaisesRegex(TypeError, "cannot perform any with this index type"): kdf.set_index(['a', 'b']).index.any() with self.assertRaisesRegex(TypeError, "cannot perform all with this index type"): kdf.set_index(['a', 'b']).index.all() def test_index_nlevels(self): pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.nlevels, 1) def test_multiindex_nlevel(self): pdf = pd.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')]) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.nlevels, 2) def test_multiindex_from_arrays(self): arrays = [['a', 'a', 'b', 'b'], ['red', 'blue', 'red', 'blue']] pidx = pd.MultiIndex.from_arrays(arrays) kidx = ks.MultiIndex.from_arrays(arrays) self.assert_eq(pidx, kidx) def test_multiindex_swaplevel(self): pidx =
pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]])
pandas.MultiIndex.from_arrays
from .disc_learning import NoiseAwareModel from .utils import MentionScorer import numbskull from numbskull import NumbSkull from numbskull.inference import FACTORS from numbskull.numbskulltypes import Weight, Variable, Factor, FactorToVar import numpy as np import random import scipy.sparse as sparse from copy import copy from pandas import DataFrame, Series from distutils.version import StrictVersion DEP_SIMILAR = 0 DEP_FIXING = 1 DEP_REINFORCING = 2 DEP_EXCLUSIVE = 3 class FeaturizedGenerativeModelWeights(object): def __init__(self, n): self.n = n self.class_prior = 0.0 self.lf_accuracy = np.zeros(n, dtype=np.float64) for optional_name in FeaturizedGenerativeModel.optional_names: setattr(self, optional_name, np.zeros(n, dtype=np.float64)) for dep_name in FeaturizedGenerativeModel.dep_names: setattr(self, dep_name, sparse.lil_matrix((n, n), dtype=np.float64)) def is_sign_sparsistent(self, other, threshold=0.1): if self.n != other.n: raise ValueError("Dimension mismatch. %d versus %d" % (self.n, other.n)) if not self._weight_is_sign_sparsitent(self.class_prior, other.class_prior, threshold): return False for i in range(self.n): if not self._weight_is_sign_sparsitent( self.lf_accuracy[i], other.lf_accuracy[i], threshold): return False for name in FeaturizedGenerativeModel.optional_names: for i in range(self.n): if not self._weight_is_sign_sparsitent( getattr(self, name)[i], getattr(other, name)[i], threshold): return False for name in FeaturizedGenerativeModel.dep_names: for i in range(self.n): for j in range(self.n): if not self._weight_is_sign_sparsitent( getattr(self, name)[i, j], getattr(other, name)[i, j], threshold): return False return True def _weight_is_sign_sparsitent(self, w1, w2, threshold): if abs(w1) <= threshold and abs(w2) <= threshold: return True elif w1 > threshold and w2 > threshold: return True elif w1 < -1 * threshold and w2 < -1 * threshold: return True else: return False class FeaturizedGenerativeModel(object): """ A generative model for data programming for binary classification. Supports featurized labeling functions. Supports dependencies among labeling functions. :param class_prior: whether to include class label prior factors :param lf_prior: whether to include labeling function prior factors :param lf_features: whether to incclude labeling function feature factors :param lf_propensity: whether to include labeling function propensity factors :param lf_class_propensity: whether to include class-specific labeling function propensity factors :param seed: seed for initializing state of Numbskull variables """ def __init__(self, class_prior=False, lf_prior=False, lf_features=False, lf_propensity=False, lf_class_propensity=False, seed=271828): numbskull_version = numbskull.__version__ numbskull_require = "0.1" if StrictVersion(numbskull_version) < StrictVersion(numbskull_require): raise ValueError( "Snorkel requires Numbskull version %s, but version %s is installed." % (numbskull_require, numbskull_version)) self.class_prior = class_prior self.lf_prior = lf_prior self.lf_features = lf_features self.lf_propensity = lf_propensity self.lf_class_propensity = lf_class_propensity self.weights = None self.rng = random.Random() self.rng.seed(seed) # These names of factor types are for the convenience of several methods # that perform the same operations over multiple types, but this class's # behavior is not fully specified here. Other methods, such as marginals(), # as well as maps defined within methods, require manual adjustments to # implement changes. # # These names are also used by other related classes, such as # GenerativeModelParameters optional_names = ('lf_prior', 'lf_propensity', 'lf_class_propensity') dep_names = ( 'dep_similar', 'dep_fixing', 'dep_reinforcing', 'dep_exclusive' ) def train(self, L, deps=(), LF_acc_priors=None, LF_acc_features=None, LF_acc_prior_default=0.7, labels=None, label_prior=0.99, init_deps=0.0, init_class_prior=-1.0, epochs=30, step_size=None, decay=1.0, reg_param=0.1, reg_type=2, verbose=False, truncation=10, burn_in=5, cardinality=None, timer=None): """ Fits the parameters of the model to a data set. By default, learns a conditionally independent model with featurized accuracies. Additional unary dependencies can be set to be included in the constructor. Additional pairwise and higher-order dependencies can be included as an argument. Results are stored as a member named weights, instance of snorkel.learning.gen_learning.GenerativeModelWeights. :param L: M x N csr_AnnotationMatrix-type label matrix, where there are M candidates labeled by N labeling functions (LFs) :param deps: collection of dependencies to include in the model, each element is a tuple of the form (LF 1 index, LF 2 index, dependency type), see snorkel.learning.constants :param LF_acc_priors: An N-element list of prior probabilities for the LF accuracies :param LF_acc_features: An N-element list of features that determine the accuracy of its labeling function; its labeling function has a single feature; feature weights are coupled :param LF_acc_prior_default: Default prior probability for each LF accuracy; if LF_acc_priors is unset, each LF will have this prior :param labels: Optional ground truth labels :param label_prior: The prior probability that the ground truth labels (if provided) are correct :param init_deps: initial weight for additional dependencies, except class prior (in log scale) :param init_class_prior: initial class prior (in log scale), note only used if class_prior=True in constructor :param epochs: number of training epochs :param step_size: gradient step size, default is 1 / L.shape[0] :param decay: multiplicative decay of step size, step_size_(t+1) = step_size_(t) * decay :param reg_param: regularization strength :param reg_type: 1 = L1 regularization, 2 = L2 regularization :param verbose: whether to write debugging info to stdout :param truncation: number of iterations between truncation step for L1 regularization :param burn_in: number of burn-in samples to take before beginning learning :param cardinality: number of possible classes; by default is inferred from the label matrix L :param timer: stopwatch for profiling, must implement start() and end() """ m, n = L.shape step_size = step_size or 0.0001 reg_param_scaled = reg_param / L.shape[0] # Automatically infer cardinality # Binary: Values in {-1, 0, 1} [Default] # Categorical: Values in {0, 1, ..., K} if cardinality is None: # This is just an annoying hack for LIL sparse matrices... try: lmax = L.max() except AttributeError: lmax = L.tocoo().max() if lmax > 2: cardinality = lmax elif lmax < 2: cardinality = 2 else: raise ValueError( "L.max() == %s, cannot infer cardinality." % lmax) print("Inferred cardinality: %s" % cardinality) # Priors for LFs default to fixed prior value # NOTE: Setting default != 0.5 creates a (fixed) factor which increases # runtime (by ~0.5x that of a non-fixed factor)... if LF_acc_priors is None: LF_acc_priors = [LF_acc_prior_default for _ in range(n)] else: LF_acc_priors = list(copy(LF_acc_priors)) if LF_acc_features is None: LF_acc_features = [str(i) for i in range(n)] else: LF_acc_features = list(copy(LF_acc_features)) # LF weights are un-fixed is_fixed = [False for _ in range(n)] # If supervised labels are provided, add them as a fixed LF with prior # Note: For large L this column stack operation could be very # inefficient, can consider refactoring... if labels is not None: labels = labels.reshape(m, 1) L = sparse.hstack([L.copy(), labels]) is_fixed.append(True) LF_acc_priors.append(label_prior) n += 1 # Shuffle the data points idxs = range(m) np.random.shuffle(idxs) if not isinstance(L, sparse.csr_matrix): L = sparse.csr_matrix(L) L = L[idxs, :] # Compile factor graph self._process_dependency_graph(L, deps) weight, variable, factor, ftv, domain_mask, n_edges, feature2WoffMap =\ self._compile(L, init_deps, init_class_prior, LF_acc_priors, LF_acc_features, is_fixed, cardinality) fg = NumbSkull( n_inference_epoch=0, n_learning_epoch=epochs, stepsize=step_size, decay=decay, reg_param=reg_param_scaled, regularization=reg_type, truncation=truncation, quiet=(not verbose), verbose=verbose, learn_non_evidence=True, burn_in=burn_in ) fg.loadFactorGraph(weight, variable, factor, ftv, domain_mask, n_edges) if timer is not None: timer.start() fg.learning(out=False) if timer is not None: timer.end() self._process_learned_weights(L, fg, LF_acc_priors, LF_acc_features, feature2WoffMap, is_fixed) # Store info from factor graph weight, variable, factor, ftv, domain_mask, n_edges, feature2WoffMap =\ self._compile(sparse.coo_matrix((1, n), L.dtype), init_deps, init_class_prior, LF_acc_priors, LF_acc_features, is_fixed, cardinality) variable["isEvidence"] = False weight["isFixed"] = True weight["initialValue"] = fg.factorGraphs[0].weight_value fg.factorGraphs = [] fg.loadFactorGraph(weight, variable, factor, ftv, domain_mask, n_edges) self.fg = fg self.nlf = n self.cardinality = cardinality def learned_lf_stats(self): """ Provides a summary of what the model has learned about the labeling functions. For each labeling function, estimates of the following are provided: Abstain Accuracy Coverage [Following are only available for binary tasks] True Positive (TP) False Positive (FP) True Negative (TN) False Negative (FN) WARNING: This uses Gibbs sampling to estimate these values. This will tend to mix poorly when there are many very accurate labeling functions. In this case, this function will assume that the classes are approximately balanced. """ if self.fg is None: raise ValueError( "Must fit model with train() before computing diagnostics.") burnin = 500 trials = 5000 cardinality = self.cardinality count = np.zeros((self.nlf, self.cardinality, cardinality + 1)) for true_label in range(cardinality): self.fg.factorGraphs[0].var_value[0, 0] = true_label self.fg.factorGraphs[0].inference(burnin, 0, True) for i in range(trials): self.fg.factorGraphs[0].inference(0, 1, True) for j in range(self.nlf): y = self.fg.factorGraphs[0].var_value[0, 0] lf = self.fg.factorGraphs[0].var_value[0, j + 1] count[j, y, lf] += 1 count /= cardinality * trials # Compute summary stats to return to user stats = [] for i in range(self.nlf): if cardinality == 2: tp = count[i, 1, 1] fp = count[i, 0, 1] tn = count[i, 0, 0] fn = count[i, 1, 0] coverage = 1 - (count[i, 0, 2] + count[i, 1, 2]) stats.append({ "Precision": tp / (tp + fp), "Recall": tp / count[i, 1, :].sum(), "Accuracy": (tp + tn) / coverage, "Coverage": coverage }) else: correct = sum([count[i, j, j] for j in range(cardinality)]) coverage = 1 - sum([count[i, j, cardinality] for j in range(cardinality)]) stats.append({ "Accuracy": correct / coverage, "Coverage": coverage }) return
DataFrame(stats)
pandas.DataFrame
import pandas as pd import genesysmod_to_iamc.pyam_aggregator as pa from genesysmod_to_iamc._statics import * class DataWrapper(object): def __init__(self, input_file, output_gdx): self._output_gdx = output_gdx self.usage_values = pd.DataFrame() self.production_values = pd.DataFrame() self.emission_values = pd.DataFrame() self.capacity_values =
pd.DataFrame()
pandas.DataFrame
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from collections import defaultdict df =
pd.read_csv('quast_0kb_cosags/report.tsv', sep='\t', index_col=0)
pandas.read_csv
from dlra.algorithms import dlra_parafac, dlra_mf, dlra_mf_bcd, dlra_mf_iht from dlra.utils import sam from mscode.utils.utils import count_support, redundance_count from mscode.utils.generator import gen_mix, initialize from mscode.methods.algorithms import ista, omp from mscode.methods.proxs import HardT #import tensorly as tl from matplotlib import pyplot as plt import pandas as pd import numpy as np import plotly.express as px import scipy.io from dlra.xp.genDCT import genDCT import copy # Seeding np.random.seed(seed=0) # Loading the data # root at this file dictio = scipy.io.loadmat('../../data/XP_completion/Urban.mat') # dict is a python dictionnary. It contains the matrix we want to NMF Yall = dictio['A'] # Extracting a 20x20 patch n = 20 m = 162 HSI = np.transpose(np.reshape(Yall, [307, 307, m]),[1,0,2]) Sliced_HSI = HSI[70:70+n,100:100+n,:] #plt.imshow(Sliced_HSI[:,:,10]) #plt.show() Y = np.reshape(Sliced_HSI,[n*n, m]) #Y = Y/np.linalg.norm(Y) verbose = 0 # Building the 2DCT dictionary D = genDCT([n,n], 1) # model parameters k = 50 r = 4 lamb = 5e-3 # 5e-3 # DataFrame to store results store_pd =
pd.DataFrame(columns=["value", "error type", "sparsity", "algorithm"])
pandas.DataFrame
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe import logging from typing import Dict, List, Type, Tuple import kats.utils.time_series_parameter_tuning as tpt import numpy as np import pandas as pd from kats.detectors import changepoint_evaluator from kats.detectors import cusum_model from kats.detectors.changepoint_evaluator import TuringEvaluator from kats.detectors.detector import DetectorModel from kats.detectors.detector_benchmark import ( decompose_params, DetectorModelSet, SUPPORTED_METRICS, ) from kats.utils.time_series_parameter_tuning import TimeSeriesParameterTuning from pymoo.factory import get_algorithm, get_crossover, get_mutation, get_sampling from pymoo.model.problem import Problem from pymoo.model.result import Result from pymoo.optimize import minimize MINIMIZE = "minimize" MAXIMIZE = "maximize" OPTIMIZATION_GOAL_OPTIONS = {MINIMIZE, MAXIMIZE} class HPT_Problem(Problem): """ Multi-objective hyper parameter tuning problem. You can specify the objectives that you want optimize from the list of SUPPORTED_METRICS. For each objective you need to provide optimization goal (minimize or maximize). For example, if you want to minimize delay and maximize F1-score you could provide objectives_and_goals = {"f_score": "maximize", "delay": "minimize"}. You can also provide more than two objectives if you like. """ def __init__( self, search_grid: TimeSeriesParameterTuning, data_df: pd.DataFrame, objectives_and_goals: Dict[str, str], ): self._validate_objectives_and_goals(objectives_and_goals) self.objectives_and_goals = objectives_and_goals # Make a list so that we always calculate fitness objectives in deterministic order. self.objectives = list(objectives_and_goals.keys()) tunable_parameters = search_grid.get_search_space().tunable_parameters self.par_to_val = {} for par in tunable_parameters: self.par_to_val[par] = tunable_parameters[par].values # Make a list of the keys (tunable parameters) so that the order is deterministic. self.tunable_parameters = list(tunable_parameters.keys()) self.lower_limits, self.upper_limits = self.get_upper_and_lower_limits() self.n_vars = len(tunable_parameters) self.all_solutions = {} self.data_df = data_df super().__init__( n_var=self.n_vars, n_obj=len(self.objectives), n_constr=0, # Currently no constraints for the fitness objectives. xl=np.array(self.lower_limits), xu=np.array(self.upper_limits), # We solve an integer problem where each integer maps to hyper parameter. type_var=int, elementwise_evaluation=True, ) self.turing_model = changepoint_evaluator.TuringEvaluator( is_detector_model=True, detector=cusum_model.CUSUMDetectorModel ) def _validate_objectives_and_goals(self, objectives_and_goals: Dict[str, str]): self._check_if_all_valid( values_to_check=list(objectives_and_goals.keys()), expected_values=SUPPORTED_METRICS, explanation="Objectives", ) self._check_if_all_valid( values_to_check=list(objectives_and_goals.values()), expected_values=OPTIMIZATION_GOAL_OPTIONS, explanation="Optimization goal", ) def _check_if_all_valid( self, values_to_check: List[str], expected_values: set, explanation: str ): if not all( [value_to_check in expected_values for value_to_check in values_to_check] ): raise Exception( f"{explanation} must be listed in {expected_values}. You provided {values_to_check}." ) def _evaluate(self, x: np.ndarray, out: np.ndarray, *args, **kwargs): out["F"] = self.get_fitness(x) def get_fitness(self, x: np.ndarray): pars = self.decode_solution(x) params_model, threshold_low, threshold_high = decompose_params(pars) results = self.turing_model.evaluate( data=self.data_df, model_params=params_model, threshold_low=threshold_low, threshold_high=threshold_high, ) self.all_solutions[self.get_unique_id_for_solution(x)] = results fitness = [0] * self.n_obj averaged_results = np.mean(results) for i in range(self.n_obj): # For maximization problem, multiply the result by -1. fitness[i] = ( averaged_results[self.objectives[i]] if self.objectives_and_goals[self.objectives[i]] == MINIMIZE else -averaged_results[self.objectives[i]] ) return fitness def get_upper_and_lower_limits(self): upper_limits = [] """ We assign the limits in the order of tunable_parameters list. The order of that list will not change which is very important so that we can match the solution vector back to tunable parameters. """ for key in self.par_to_val: upper_limits.append(len(self.par_to_val[key]) - 1) # All tunable_parameters should have at least one option. lower_limits = [0] * len(self.par_to_val) return lower_limits, upper_limits def decode_solution(self, x: np.ndarray) -> Dict[str, float]: pars = {} i = 0 for key in self.tunable_parameters: pars[key] = self.par_to_val[key][x[i]] i += 1 return pars def get_unique_id_for_solution(self, x: np.ndarray) -> str: return ",".join([str(x_component) for x_component in x]) class MultiObjectiveModelOptimizer(DetectorModelSet): def __init__( self, model_name: str, model: Type[DetectorModel], parameters_space: List[Dict], data_df: pd.DataFrame, n_gen: int, pop_size: int, objectives_and_goals: Dict[str, str], ): super().__init__(model_name, model) self.model_name = model_name self.model = model self.result = {} self.solutions =
pd.DataFrame()
pandas.DataFrame
import os import numpy as np import pytest from pandas.compat import is_platform_little_endian import pandas as pd from pandas import DataFrame, HDFStore, Series, _testing as tm, read_hdf from pandas.tests.io.pytables.common import ( _maybe_remove, ensure_clean_path, ensure_clean_store, tables, ) from pandas.io import pytables as pytables from pandas.io.pytables import ClosedFileError, PossibleDataLossError, Term pytestmark = pytest.mark.single def test_mode(setup_path): df = tm.makeTimeDataFrame() def check(mode): msg = r"[\S]* does not exist" with ensure_clean_path(setup_path) as path: # constructor if mode in ["r", "r+"]: with pytest.raises(IOError, match=msg): HDFStore(path, mode=mode) else: store = HDFStore(path, mode=mode) assert store._handle.mode == mode store.close() with ensure_clean_path(setup_path) as path: # context if mode in ["r", "r+"]: with pytest.raises(IOError, match=msg): with HDFStore(path, mode=mode) as store: pass else: with HDFStore(path, mode=mode) as store: assert store._handle.mode == mode with ensure_clean_path(setup_path) as path: # conv write if mode in ["r", "r+"]: with pytest.raises(IOError, match=msg): df.to_hdf(path, "df", mode=mode) df.to_hdf(path, "df", mode="w") else: df.to_hdf(path, "df", mode=mode) # conv read if mode in ["w"]: msg = ( "mode w is not allowed while performing a read. " r"Allowed modes are r, r\+ and a." ) with pytest.raises(ValueError, match=msg): read_hdf(path, "df", mode=mode) else: result = read_hdf(path, "df", mode=mode) tm.assert_frame_equal(result, df) def check_default_mode(): # read_hdf uses default mode with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", mode="w") result = read_hdf(path, "df") tm.assert_frame_equal(result, df) check("r") check("r+") check("a") check("w") check_default_mode() def test_reopen_handle(setup_path): with ensure_clean_path(setup_path) as path: store = HDFStore(path, mode="a") store["a"] = tm.makeTimeSeries() msg = ( r"Re-opening the file \[[\S]*\] with mode \[a\] will delete the " "current file!" ) # invalid mode change with pytest.raises(PossibleDataLossError, match=msg): store.open("w") store.close() assert not store.is_open # truncation ok here store.open("w") assert store.is_open assert len(store) == 0 store.close() assert not store.is_open store = HDFStore(path, mode="a") store["a"] = tm.makeTimeSeries() # reopen as read store.open("r") assert store.is_open assert len(store) == 1 assert store._mode == "r" store.close() assert not store.is_open # reopen as append store.open("a") assert store.is_open assert len(store) == 1 assert store._mode == "a" store.close() assert not store.is_open # reopen as append (again) store.open("a") assert store.is_open assert len(store) == 1 assert store._mode == "a" store.close() assert not store.is_open def test_open_args(setup_path): with tm.ensure_clean(setup_path) as path: df = tm.makeDataFrame() # create an in memory store store = HDFStore( path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0 ) store["df"] = df store.append("df2", df) tm.assert_frame_equal(store["df"], df) tm.assert_frame_equal(store["df2"], df) store.close() # the file should not have actually been written assert not os.path.exists(path) def test_flush(setup_path): with ensure_clean_store(setup_path) as store: store["a"] = tm.makeTimeSeries() store.flush() store.flush(fsync=True) def test_complibs_default_settings(setup_path): # GH15943 df = tm.makeDataFrame() # Set complevel and check if complib is automatically set to # default value with ensure_clean_path(setup_path) as tmpfile: df.to_hdf(tmpfile, "df", complevel=9) result = pd.read_hdf(tmpfile, "df") tm.assert_frame_equal(result, df) with tables.open_file(tmpfile, mode="r") as h5file: for node in h5file.walk_nodes(where="/df", classname="Leaf"): assert node.filters.complevel == 9 assert node.filters.complib == "zlib" # Set complib and check to see if compression is disabled with ensure_clean_path(setup_path) as tmpfile: df.to_hdf(tmpfile, "df", complib="zlib") result = pd.read_hdf(tmpfile, "df") tm.assert_frame_equal(result, df) with
tables.open_file(tmpfile, mode="r")
pandas.tests.io.pytables.common.tables.open_file
#!/usr/bin/env python # coding: utf-8 #Project name: netCDF --> Excel #Description: It receives a netCDF file with a predefined format and returns the equivalent file in excel format. #Programmers: <NAME> #Date: 07-09-2020 #Loading the packages import pandas as pd import xarray as xr import numpy as np import scipy print("Enter the exact name of the netCDF file. Please do include the file extension (e.g., .nc):") netCDF_file_name = input() ds_disk = xr.open_dataset("./data/input/"+netCDF_file_name) #Fetching the global attributes ds_global_attributes = ds_disk.attrs #Fetching the indexes (It will not be used later on as the indeces are also available in the variables) ds_indeces = list(ds_disk.coords.indexes["SAMPLE NAME"]) #Fetching the variables ds_disk_variables = ds_disk.to_dataframe().reset_index() #Fetching the column names column_names = list(ds_disk_variables.columns) # Retrieve the columns order orders = ds_disk_variables.iloc[len(ds_disk_variables)-1].to_dict() # Remove the columns order ds_disk_variables = ds_disk_variables.drop(axis=0, index=len(ds_disk_variables)-1) # ds_disk_variables = ds_disk_variables.drop(axis=0, index=len(ds_disk_variables)) # Extraction of the order and place them in two arrays column_dict = {"left":{}, "right":{}} for i,j in orders.items(): info = str(j).split(".") if j == "flag": side, order = 1 , 0 column_dict["left"][i] = "1.0" else: side, order = info[0] , info[1] if side == "1": column_dict["left"][i] = j elif side == "2": column_dict["right"][i] = j left_dict = {k: v for k, v in sorted(column_dict["left"].items(), key=lambda item: item[1])} right_dict = {k: v for k, v in sorted(column_dict["right"].items(), key=lambda item: item[1])} left = [i for i in left_dict] right = [i for i in right_dict] # Reordering the columns ds_disk_variables = ds_disk_variables[left+right] #Creating an empty dictionary where the keys are the column names, and the values refer to the properties of those columns ds_dict = {i:[] for i in left+right} for j in left: ds_dict[j] = {} ds_dict[j]["comment"] = "" for j in right: ds_dict[j] = {} ds_dict[j]["units"] = "" ds_dict[j]["comment"] = "" #Populating the dictionary for i in left: if i == "SAMPLE NAME": #Is this property already stored in the NetCDF file? ds_dict[i]["comment"] = "must match a sample on the SAMPLES tab column A" else: ds_dict[i]["comment"] = ds_disk.data_vars[i].attrs["comment"] for j in right: ds_dict[j]["units"] = ds_disk.data_vars[j].attrs["units"] ds_dict[j]["comment"] = ds_disk.data_vars[j].attrs["comment"] #Creating a dataframe for the left side of the tables's header s =
pd.DataFrame(columns=ds_disk_variables.columns)
pandas.DataFrame
import operator import numpy as np import pytest import pytz from pandas._libs.tslibs import IncompatibleFrequency import pandas as pd from pandas import Series, date_range import pandas._testing as tm def _permute(obj): return obj.take(np.random.permutation(len(obj))) class TestSeriesFlexArithmetic: @pytest.mark.parametrize( "ts", [ (lambda x: x, lambda x: x * 2, False), (lambda x: x, lambda x: x[::2], False), (lambda x: x, lambda x: 5, True), (lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True), ], ) @pytest.mark.parametrize( "opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"] ) def test_flex_method_equivalence(self, opname, ts): # check that Series.{opname} behaves like Series.__{opname}__, tser =
tm.makeTimeSeries()
pandas._testing.makeTimeSeries
# -*- coding: utf-8 -*- """ Created on Fri Jul 23 11:04:11 2021 @author: IvanTower """ import numpy as np import pandas as pd from pathlib import Path import os from statsmodels.tsa.stattools import adfuller, grangercausalitytests def adf_test(df): """ Augmented Dicky-Fuller test Test if the time series is stationary or not. If the p-value is below 0.05 we consider it stationary """ result = adfuller(df.values) print('ADF Statistics: %f' % result[0]) print('p-value: %f' % result[1]) print('Critical values:') for key, value in result[4].items(): print('\t%s: %.3f' % (key, value)) def grangers_causation_matrix(data, variables, test='ssr_chi2test', verbose=False): """Check Granger Causality of all possible combinations of the Time series. The rows are the response variable, columns are predictors. The values in the table are the P-Values. P-Values lesser than the significance level (0.05), implies the Null Hypothesis that the coefficients of the corresponding past values is zero, that is, the X does not cause Y can be rejected. data : pandas dataframe containing the time series variables variables : list containing names of the time series variables. """ maxlag=12 df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables) for c in df.columns: for r in df.index: test_result = grangercausalitytests(data[[r, c]], maxlag=maxlag, verbose=False) p_values = [round(test_result[i+1][0][test][1],4) for i in range(maxlag)] if verbose: print(f'Y = {r}, X = {c}, P Values = {p_values}') min_p_value = np.min(p_values) df.loc[r, c] = min_p_value df.columns = [var for var in variables] df.index = [var + '_y' for var in variables] return df def calculate_linear_granger(model_metric = "MSE", save = True): """ Calculate the linear Granger Causality for the tested metric The model_metric can be either "MSE" for the autoencoders and anomaly detectors or "f1_score" for the object detectors First all the tested columns are run through an augmented Dicky-Fuller test, which yields that not all of them are stationary A first order differenciation is done for all columns and they are again run through the test, which this time all are stationary Finally the linear Granger Causality test is performed with only the causality relationship between the tested metric and the columns is saved """ if model_metric == "MSE": augmented_results = 'augmented_mse.csv' else: augmented_results = 'augmented_f1_score.csv' main_path = Path(os.path.dirname(__file__)).parent metadata_path = r"visualize_results" df = pd.read_csv(os.path.join(main_path, metadata_path, augmented_results)) tested_columns = ["Temperature", "Humidity", "Activity", "Day_Night", model_metric] cleanup_nums = {"Day_Night": {"day": 0, "night": 1}} df = df.replace(cleanup_nums) for model in df["model"].unique(): df_small = df[df["model"]== model].copy() df_small = df_small[tested_columns] print(f"------------ MODEL {model} ----------------") for column in tested_columns: print(f'ADF Test: {column} time series') adf_test(df_small[column]) print("TRANSFORM DATA -------------") for model in df["model"].unique(): df_small = df[df["model"]== model].copy() df_small = df_small[tested_columns] df_small_transformed = df_small.diff().dropna() print(f"------------ MODEL {model} ----------------") for column in tested_columns: print(f'ADF Test: {column} time series') adf_test(df_small_transformed[column]) print("LINEAR GRANGER CAUSALITY TEST ---------------") all_models = [] for model in df["model"].unique(): df_small = df[df["model"]== model].copy() df_small = df_small[tested_columns] df_small_transformed = df_small.diff().dropna() print(f"------------ MODEL {model} ----------------") granger_df = grangers_causation_matrix(df_small_transformed, variables = df_small_transformed.columns) output_statistic = granger_df.loc[[f"{model_metric}_y"], granger_df.columns != model_metric].squeeze().sort_values(ascending=True).to_frame().T output_statistic = output_statistic.rename(index={f"{model_metric}_y": model}) print(output_statistic) all_models.append(output_statistic) all_models_df =
pd.concat(all_models)
pandas.concat
#%% import matplotlib.pyplot as plt import matplotlib as mplpy import seaborn as sns import pandas as pd import numpy as np #Ignore warnings import warnings warnings.filterwarnings("ignore") #import testing and training dataset train = pd.read_csv('D:\\My Projects\\Python Projects\\house\\train.csv') test = pd.read_csv('D:\\My Projects\\Python Projects\\house\\test.csv') #Show number of columns and rows train.shape #show all the columns of our dataset train.columns #getting all the statistical information on house prices train['SalePrice'].describe() #select only datas that have numberical value train_corr = train.select_dtypes(include=[np.number]) #delete the id(it is not useful for determining correlation between data and is a noisy feature del train_corr['Id'] #Heated map showing correlation between all numerical values corr = train_corr.corr() plt.subplots(figsize=(27,12)) sns.heatmap(corr, annot=True) #Top 60% of correlative data top_feature = corr.index[abs(corr['SalePrice']>0.6)] plt.subplots(figsize=(16, 10)) top_corr = train[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.show() #Listing most correlated features most_corr =
pd.DataFrame(top_feature)
pandas.DataFrame
import requests import re import ipaddress import pandas as pd import openpyxl from tkinter import * from tkinter import filedialog import tkinter.messagebox import os import configparser from openpyxl.styles import Border, Side from openpyxl.formatting.rule import ColorScaleRule, FormulaRule config = configparser.ConfigParser() config.read(r"API_KEY.ini") api_key = config.get('Auth', 'API_KEY') filepath = None GUI = Tk() def checkip(IP): if ipaddress.ip_address(IP).is_private is False: abipdbheaders = { 'Key': api_key, 'Accept': 'application/json', } abipdbparams = { 'maxAgeInDays': 1, 'ipAddress': IP, } req = requests.get("https://api.abuseipdb.com/api/v2/check", headers = abipdbheaders, params = abipdbparams) resp = req.json() if 'errors' not in resp: return resp["data"] else: exit() else: return (f"{IP} is private") def filterip(ipin) : ipregex = re.compile(r'(?:\d{1,3}\.)+(?:\d{1,3})') ipa = re.search(ipregex, ipin) return ipa.group(0) def checkipfromfile(infile): iplist = [] output = [] f1 = open(infile, 'r') tmp = f1.readlines() for i in tmp: if i == '' or i == " " or i == "198.19.250.1" or i == "\n": pass else: iplist.append(filterip(i)) for i in iplist: output.append(checkip(i)) f1.close() return output def checkipfrominput(in1): iplist2 = [] output2 = [] tmp2 = in1.split('\n') for i in tmp2: if i == '' or i == " " or i == "198.19.250.1" or i == "\n": pass else: iplist2.append(filterip(i)) for i in iplist2: output2.append(checkip(i)) return output2 def get_report(input): concdict = { k: [d.get(k) for d in input if k in d] for k in set().union(*input) } IpaddList = concdict.get("ipAddress") AbusescoreList = concdict.get("abuseConfidenceScore") PublicList = concdict.get("isPublic") IpverList = concdict.get("ipVersion") IswlList = concdict.get("isWhitelisted") CountrycList = concdict.get("countryCode") UsageList = concdict.get("usageType") IspList = concdict.get("isp") DomainList = concdict.get("domain") TotalreportsList = concdict.get("totalReports") LastreportList = concdict.get("lastReportedAt") wb = openpyxl.Workbook() ws = wb.active ws['A1'] = 'ipAddress' ws['B1'] = 'abuseConfidenceScore' ws['C1'] = 'isPublic' ws['D1'] = 'ipVersion' ws['E1'] = 'isWhitelisted' ws['F1'] = 'countryCode' ws['G1'] = 'usageType' ws['H1'] = 'isp' ws['I1'] = 'domain' ws['J1'] = 'totalReports' ws['K1'] = 'lastReportedAt' border_style = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) clrrule = ColorScaleRule(start_type= 'num', start_value='0',start_color='00B050', mid_type= 'num', mid_value='25', mid_color='FCA904', end_type='num', end_value='100', end_color='CC0000') ws.conditional_formatting.add('B2:B500', clrrule) ws.conditional_formatting.add('A1:K500', FormulaRule(formula=['NOT(ISBLANK(A1))'], stopIfTrue=False, border=border_style)) dataframeIpaddList = pd.DataFrame({'ipAddress': IpaddList}) for index, row in dataframeIpaddList.iterrows(): cell = 'A%d' % (index + 2) ws[cell] = row[0] dataframeAbusescoreList = pd.DataFrame({'abuseConfidenceScore': AbusescoreList}) for index, row in dataframeAbusescoreList.iterrows(): cell = 'B%d' % (index + 2) ws[cell] = row[0] dataframePublicList = pd.DataFrame({'isPublic': PublicList}) for index, row in dataframePublicList.iterrows(): cell = 'C%d' % (index + 2) ws[cell] = row[0] dataframeIpverList = pd.DataFrame({'ipVersion': IpverList}) for index, row in dataframeIpverList.iterrows(): cell = 'D%d' % (index + 2) ws[cell] = row[0] dataframeIswlList = pd.DataFrame({'isWhitelisted': IswlList}) for index, row in dataframeIswlList.iterrows(): cell = 'E%d' % (index + 2) ws[cell] = row[0] dataframeCountrycList = pd.DataFrame({'countryCode': CountrycList}) for index, row in dataframeCountrycList.iterrows(): cell = 'F%d' % (index + 2) ws[cell] = row[0] dataframeUsageList =
pd.DataFrame({'usageType': UsageList})
pandas.DataFrame
from os import name import sys import random import numpy as np import pandas as pd def init_matrix(names, constraints=None): """Function used to initialize matrix of participants with constraints""" number_of_participants = len(names) if number_of_participants <= 2: print('WARNING: not enough participants') sys.exit(1) matrix = np.ones((len(names), len(names))) np.fill_diagonal(matrix, 0) guests =
pd.DataFrame(matrix, index=names, columns=names)
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import pandas as pd def Heatmap(data): """Plot the correlation heatmap of a dataset. Parameters ---------- data: DataFrame Dataset to analyse. Returns ------- fig: plot Correlation heatmap of the dataset. """ data =
pd.DataFrame(data)
pandas.DataFrame
''' .. _picasso.localize: https://picassosr.readthedocs.io/en/latest/localize.html .. _picasso.render: https://picassosr.readthedocs.io/en/latest/render.html .. _spt: https://www.biorxiv.org/content/10.1101/2020.05.17.100354v1 .. _picasso.filter: https://picassosr.readthedocs.io/en/latest/filter.html ''' import pandas as pd import h5py import os import numpy as np import picasso.io as io #%% def load_locs(path): """ Returns localization .hdf5 from picasso as pandas.DataFrame and corresponding .yaml as list of dictionaries. Args: path (str): Path to .hdf5 file as generated by `picasso.localize`_ container (str): Name of dataset within .hdf5 container. Must be set to: * ``'locs'`` for _locs, _render and _picked .hdf5 files as generated by `picasso.render`_. * ``'groups'`` for _pickprops.hdf5 files as generated by `picasso.render`_. * Defaults to ``'locs'``. Returns: tuple: - [0] (pandas.DataFrame): Localizations stored under ``'locs'`` in .hdf5 file - [1] (list): List of dictionaries contained in .yaml corresponding to .hdf5 file """ locs, info = io.load_locs(path) locs =
pd.DataFrame(locs)
pandas.DataFrame
# this script will attemp to detect card fraud with naive bayes, svm, decision treed. # can later do it with k-clustering, eta boosting, random forest once ive learnt them. #The data is skewed and so would give a high accuracy even if it wasnt detecting fraud since it would detect the non-fraudalent transactions. # There fore use an equal number of fraudalent and non-fraudalent transactions in the test data. #use 10% of the fraudalent transactions as test data and then an equal number of non-fraudalent transactions import pandas import numpy as np import random import Stef_nn as snn import time from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn import tree from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score #load in the data data =
pandas.read_csv('creditcard.csv')
pandas.read_csv
from dataclasses import field, fields import encodings from itertools import count, groupby from multiprocessing import Condition from re import U import pandas as pd import altair as alt import streamlit as st from vega_datasets import data import networkx as nx import matplotlib.pyplot as plt import numpy as np import math as math import time import streamlit as st plt.style.use('seaborn') plt.rcParams['figure.figsize'] = [14,14] from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel from sklearn.cluster import MiniBatchKMeans # @st.cache def load_data(): """ Implement a function to load the datasets, add a column for Platform name, perform data cleaning for country and release year """ df_netflix = pd.read_csv("netflix_titles.csv") df_disney = pd.read_csv("disney_plus_titles.csv") df_hulu = pd.read_csv("hulu_titles.csv") df_netflix['Platform'] = "Netflix" df_disney['Platform'] = "Disney" df_hulu['Platform'] = "Hulu" df = pd.concat([df_netflix,df_disney,df_hulu], ignore_index = True) df.reset_index(inplace=True) df.drop('show_id', axis=1, inplace=True) df.loc[:,'country'] = df.country.str.split(', ') df = df.explode('country') df.drop_duplicates(['title'],keep = "first", inplace = True) df['release_year'] = pd.to_datetime(df['release_year'], format = '%Y') df['date_added'] = pd.to_datetime(df['date_added'], format = '%Y-%m-%d') df['date_added_month'] = df['date_added'].dt.month_name() return df pass def getCountryData(df): """ Implement a function to get counts of data based on show type """ country_df = df[["country","type","index"]] country_df = country_df.groupby(['country','type']).count().unstack() country_df.columns = ['Movie','TV Show'] country_df = country_df.reset_index().fillna(0) country_df = country_df.drop(0, axis = 0) country_df['Movie'] = country_df['Movie'].astype('int') country_df['TV Show'] = country_df['TV Show'].astype('int') country_df['Total_titles'] = country_df['Movie']+country_df['TV Show'] country_df = country_df.sort_values(by = 'Total_titles', ascending = False) return country_df pass def getCountryPlatformData(df): """ Implement a function to get country and total titles data, add numeric codes to implement choropleth maps """ df_codes = pd.read_csv("countries_codes_and_coordinates.csv") df_codes.drop(['Alpha-2 code','Alpha-3 code','Latitude (average)','Longitude (average)'], axis = 1, inplace = True) df_codes['Numeric code'] = df_codes['Numeric code'].str.replace('"','') cp_df = df[["country",'index','Platform']] cp_df = cp_df.groupby(['country','Platform']).count().unstack() cp_df.columns = ['Netflix','Disney','Hulu'] cp_df = cp_df.reset_index().fillna(0) cp_df = cp_df.drop(0, axis = 0) cp_df['Netflix'] = cp_df['Netflix'].astype('int') cp_df['Disney'] = cp_df['Disney'].astype('int') cp_df['Hulu'] = cp_df['Hulu'].astype('int') cp_df['Total_titles'] = cp_df['Netflix']+cp_df['Disney']+cp_df['Hulu'] cp_df = cp_df.sort_values(by = 'Total_titles', ascending = False) temp_df = cp_df.merge(df_codes.rename({'Country':'country'},axis = 1), on = 'country', how = 'left') temp_df = temp_df.fillna("0") temp_df['Numeric code'] = pd.to_numeric(temp_df['Numeric code']) return temp_df pass def getGenreData(df): genre_df = df[["listed_in","index"]] genre_df.loc[:,'listed_in'] = genre_df.listed_in.str.split(',') genre_df = genre_df.explode('listed_in') genre_df['listed_in'] = genre_df['listed_in'].str.strip() genre_df['listed_in'] = genre_df['listed_in'].replace(["Action-Adventure","Anime Features","Classic Movies","Comedies","Documentaries","Dramas","Game Shows","Historical","International","Lifestyle","Music","Musical",],["Action & Adventure","Anime","Classics","Comedy","Documentary","Drama","Game Show / Competition","History","International Movies","Lifestyle & Culture","Music & Musicals","Music & Musicals"]) genre_df['listed_in'] = genre_df['listed_in'].replace(["Horror","Stand Up","Stand-Up Comedy","Sketch Comedy","Talk Show","Thrillers"],["TV Horror","Stand-Up Comedy & Talk Shows","Stand-Up Comedy & Talk Shows","Stand-Up Comedy & Talk Shows", "Stand-Up Comedy & Talk Shows","Thriller"]) genre_df.drop_duplicates(['index'],keep = "first", inplace = True) df['genre'] = genre_df['listed_in'].tolist() df.drop('listed_in', axis = 1, inplace = True) return genre_df pass def get_slice_membership(combined,platformName): """ Implement a function that computes which rows of the given dataframe should be part of the slice, and returns a boolean pandas Series that indicates 0 if the row is not part of the slice, and 1 if it is part of the slice. We are slicing based on platform. """ labels = pd.Series([1] * len(combined), index=df.index) if platformName: labels &= combined['Platform'].isin([platformName]) return labels def get_slice_genreType(df, platformName,genreName, typeName): """ Implement a function that computes which rows of the given dataframe should be part of the slice, and returns a boolean pandas Series that indicates 0 if the row is not part of the slice, and 1 if it is part of the slice. We are slicing based on platform and genre. """ labels = pd.Series([1] * len(df), index = df.index) if genreName: labels &= df['genre'].isin([genreName]) if platformName: labels &= df['Platform'].isin([platformName]) if typeName: labels &= df['type'].isin([typeName]) return labels st.title("Netflix and No Chill") #col1, col2, col3 = st.columns(3) #with col1: # st.image("netflix_logo1.jpg", width = 200) #with col2: # st.image("disney_logo.jpg", width = 200) #with col3: # st.image("hulu_logo.jpg", width = 200) st.write("VARIETY . ACCESSIBILITY . DOLLARS", width = 500) st.markdown("The main points of focus for OTT Platforms is to provide a wide variety of content to their viewers which is easily accessible to them at a price that binds the former two.\ To stay relevant and rule the market, platforms need to understand viewers choice of content") df = load_data() c_df = getCountryData(df) temp_df = getCountryPlatformData(df) genre_df = getGenreData(df) #map and top 10 genres - ratings source = alt.topo_feature(data.world_110m.url,'countries') click = alt.selection_single(empty = "all", fields = ['country']) world = alt.Chart(source, title = "World Map color-coded with the number of titles produced").properties(width = 800, height = 800).mark_geoshape( ).encode( color = alt.Color('Total_titles:Q',scale=alt.Scale(scheme="spectral"), legend=alt.Legend(title = "Count of Titles")), opacity = alt.condition(click, alt.value(1), alt.value(0.2)), tooltip = [ alt.Tooltip("country:N", title = "Country"), alt.Tooltip("Total_titles:Q", title = "Total Titles") ] ).transform_lookup( lookup = 'id', from_ = alt.LookupData(temp_df,'Numeric code',['Total_titles','country']) ).project( "naturalEarth1" ).properties( width = 500, height = 300 ).add_selection(click) bars_genre = alt.Chart(df).properties( width = 300, height = 300 ).mark_bar().encode( y = alt.Y("genre:N", sort = '-x', axis= alt.Axis(title="Genre")), x = alt.X("count:Q", axis = alt.Axis(title="Count of Titles")), color = alt.Color("count()", legend = None)).transform_filter(click).transform_aggregate( count = 'count()', groupby = ['genre'] ).transform_window( window = [{'op': 'rank', "as":'rank'}], sort = [{'field' : 'count', 'order': 'descending'}] ).transform_filter( ("datum.rank <= 10") ).add_selection(click) bars_rating = alt.Chart(df).properties( width = 300, height = 300 ). mark_bar().encode( y = alt.Y("rating:N", sort = '-x', axis = alt.Axis(title="Ratings")), x = alt.X("count()", axis = alt.Axis(title = "Count of Titles")), color = alt.Color("count()", legend = None)).transform_filter(click).add_selection(click) charts = bars_genre | bars_rating st.altair_chart(world & charts) #slicing platform_selectbox= st.selectbox("Platform",df['Platform'].unique()) membership= get_slice_membership(df,platform_selectbox) click1 = alt.selection_single(empty = "all", fields = ['type']) typeOfShow = alt.Chart(df[membership], title = "TV Shows vs Movies").properties(width = 300, height = 300).transform_joinaggregate( total = 'count(*)').transform_calculate( pct = '1/ datum.total').mark_bar(size = 100).encode( alt.X("type", title = "Type"), alt.Y('sum(pct):Q', axis = alt.Axis(format='.0%', title="Percentage of Titles"), scale=alt.Scale(domain=[0,1])), alt.Color("type") ).add_selection(click1) monthAdded = alt.Chart(df[membership], title = "Titles added across Months").properties(width= 300, height = 300).mark_bar().encode( x = alt.X("date_added_month", title="Month", sort = '-y'), y = alt.Y("count()", title = "Number of Titles Added"), color = alt.Color("type") ).transform_filter(click1).add_selection(click1) st.altair_chart(typeOfShow | monthAdded) click_legend = alt.selection_single(fields = ['genre'], bind = 'legend') total = len(df[membership]) genreOfShow = alt.Chart(df[membership], title = "A slice of Genre").properties(width = 600, height = 500).mark_arc().encode( color = alt.Color("genre:N"), theta = alt.Theta("mycount:Q"), tooltip = ['genre','percentage:O'], opacity = alt.condition(click_legend, alt.value(1), alt.value(0.2)), order = alt.Order(field="mycount", type="quantitative", sort="descending") ).transform_aggregate( groupby = ['genre'], mycount = "count()", ).transform_calculate( percentage = f"round(datum.mycount / {total} * 1000.0) / 10.0 + '%'" ).add_selection(click_legend) st.write(genreOfShow) st.title("Prediction") ans1 = "" ans1 = st.text_input("Movie Name") # load the data df1 = pd.read_csv('netflix_titles.csv') # convert to datetime df1["date_added"] =
pd.to_datetime(df1['date_added'])
pandas.to_datetime
from urllib.request import urlopen import requests import pandas as pd from bs4 import BeautifulSoup from selenium import webdriver PATH_TO_SAVE = '/home/broilo/Documents/GitHub/Dataset/UFC-project/data-mining/' def get_rows(name): """[summary] Args: name ([type]): [description] Returns: [type]: [description] """ # Catch the HTML content from URL wiki_url = "https://pt.wikipedia.org/wiki/"+name response = requests.get(wiki_url) soup = BeautifulSoup(response.text, 'html.parser') table = soup.find('table', attrs={'class': 'wikitable sortable'}) rows = table.find_all('tr') return rows # print(get_rows(name)) def dataframe(name): """[summary] Args: name ([type]): [description] Returns: [type]: [description] """ # Builds a dataframe form the HTML content rows = get_rows(name) columns = [v.text.replace('\n', '') for v in rows[0].find_all('th')] df = pd.DataFrame(columns=columns) for i in range(2, len(rows)): tds = rows[i].find_all('td') values = [td.text.replace('\n', '') for td in tds] df = df.append(
pd.Series(values, index=columns)
pandas.Series
import argparse import numpy as np import pandas as pd import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import os import seaborn as sns import matplotlib.dates as mdates import sys sys.path.append('../') from processing_helpers import * from load_paths import load_box_paths mpl.rcParams['pdf.fonttype'] = 42 def parse_args(): description = "Process simulation outputs to send to Civis" parser = argparse.ArgumentParser(description=description) parser.add_argument( "-stem", "--stem", type=str, help="Name of experiment and folder name", default=None, ) parser.add_argument( "-p", "--processStep", type=str, help="Only required if files are too large to process regions in a loop", default='generate_outputs', ) parser.add_argument( "-l", "--Location", type=str, help="Local or NUCLUSTER", default='Local', ) return parser.parse_args() def get_scenarioName(exp_suffix) : scenarioName = exp_suffix if exp_suffix == "reopen": scenarioName = "reopen_gradual" if exp_suffix == "gradual": scenarioName = "reopen_gradual" if exp_suffix == "interventionStop": scenarioName = "endsip" if exp_suffix == "0": scenarioName = "baseline" if exp_suffix == "neverSIP": scenarioName = "neversip" if exp_suffix == "stopSIP30": scenarioName = "july1partial30" if exp_suffix == "stopSIP10": scenarioName = "july1partial10" return(scenarioName) def plot_sim(dat,suffix,channels) : if suffix not in ["All","central","southern","northeast","northcentral"]: suffix_nr = str(suffix.split("-")[1]) if suffix == "All": suffix_nr ="illinois" capacity = load_capacity(suffix_nr) fig = plt.figure(figsize=(18, 12)) fig.subplots_adjust(right=0.97, wspace=0.2, left=0.07, hspace=0.15) palette = sns.color_palette('Set1', len(channels)) for c, channel in enumerate(channels): ax = fig.add_subplot(3, 3, c + 1) ax.plot(dat['date'], dat['%s_median' % channel], color=palette[c]) ax.fill_between(dat['date'].values, dat['%s_95CI_lower' % channel], dat['%s_95CI_upper' % channel], color=palette[c], linewidth=0, alpha=0.2) ax.fill_between(dat['date'].values, dat[ '%s_50CI_lower' % channel], dat[ '%s_50CI_upper' % channel], color=palette[c], linewidth=0, alpha=0.4) if channel in capacity.keys(): ax.plot([np.min(dat['date']), np.max(dat['date'])], [capacity[channel], capacity[channel]], '--', linewidth=2, color=palette[c]) ax.set_title(channel, y=0.85) ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%y')) plotname = f'{scenarioName}_{suffix}' plotname = plotname.replace('EMS-','covidregion_') plt.savefig(os.path.join(plot_path, plotname + '.png')) plt.savefig(os.path.join(plot_path, 'pdf', plotname + '.pdf'), format='PDF') # plt.show() def load_and_plot_data(ems_region, savePlot=True) : region_suffix = f'_{str(ems_region)}' column_list = ['startdate', 'time', 'scen_num', 'sample_num', 'run_num'] outcome_channels = ['susceptible', 'infected', 'recovered', 'infected_cumul','asymp_cumul','asymp_det_cumul', 'symp_mild_cumul', 'symp_severe_cumul', 'symp_mild_det_cumul', 'symp_severe_det_cumul', 'hosp_det_cumul', 'hosp_cumul', 'detected_cumul', 'crit_cumul', 'crit_det_cumul', 'deaths_det_cumul', 'deaths', 'crit_det', 'critical', 'hosp_det', 'hospitalized'] for channel in outcome_channels: column_list.append(channel + region_suffix) df = load_sim_data(exp_name,region_suffix = region_suffix, column_list=column_list) df['ems'] = ems_region df['ventilators'] = get_vents(df['crit_det'].values) df['new_symptomatic'] = df['new_symptomatic_severe'] + df['new_symptomatic_mild'] + df['new_detected_symptomatic_severe'] + df['new_detected_symptomatic_mild'] channels = ['infected', 'new_infected', 'new_symptomatic', 'new_deaths', 'new_detected_deaths', 'hospitalized', 'critical', 'hosp_det', 'crit_det', 'ventilators', 'recovered'] plotchannels = ['infected', 'new_infected', 'new_symptomatic', 'new_deaths', 'new_detected_deaths', 'hosp_det', 'crit_det', 'ventilators', 'recovered'] adf = pd.DataFrame() for c, channel in enumerate(channels): mdf = df.groupby(['date', 'ems'])[channel].agg([CI_50, CI_2pt5, CI_97pt5, CI_25, CI_75]).reset_index() mdf = mdf.rename(columns={'CI_50': '%s_median' % channel, 'CI_2pt5': '%s_95CI_lower' % channel, 'CI_97pt5': '%s_95CI_upper' % channel, 'CI_25': '%s_50CI_lower' % channel, 'CI_75': '%s_50CI_upper' % channel}) if adf.empty: adf = mdf else: adf = pd.merge(left=adf, right=mdf, on=['date', 'ems']) if savePlot : plot_sim(adf, suffix=ems_region, channels=plotchannels) return adf def process_and_save(adf,ems_region, SAVE = True) : col_names = civis_colnames(reverse=False) adf = adf.rename(columns=col_names) adf.geography_modeled = adf.geography_modeled.str.replace('-', "") adf.geography_modeled = adf.geography_modeled.str.lower() adf.geography_modeled = adf.geography_modeled.str.replace('all', "illinois") adf['scenario_name'] = scenarioName dfout = adf[ ['date', 'geography_modeled', 'scenario_name', 'cases_median', 'cases_lower', 'cases_upper', 'cases_new_median', 'cases_new_lower', 'cases_new_upper', 'deaths_median', 'deaths_lower', 'deaths_upper', 'deaths_det_median', 'deaths_det_lower', 'deaths_det_upper', 'hosp_bed_median', 'hosp_bed_lower', 'hosp_bed_upper','hosp_det_bed_median', 'hosp_det_bed_lower', 'hosp_det_bed_upper', 'icu_median', 'icu_lower', 'icu_upper', 'icu_det_median', 'icu_det_lower', 'icu_det_upper', 'vent_median', 'vent_lower', 'vent_upper', 'recovered_median', 'recovered_lower', 'recovered_upper']] if SAVE : filename = "nu_" + simdate + "_" + ems_region + ".csv" rename_geography_and_save(dfout, filename=filename) return dfout def rename_geography_and_save(df,filename) : dfout = df.copy() if "geography_modeled" not in dfout.columns: dfout.rename(columns={'ems': 'covid_region'}, inplace=True) dfout['covid_region'] = dfout['covid_region'].str.replace('EMS-', '') if "geography_modeled" in dfout.columns: dfout['geography_modeled'] = dfout['geography_modeled'].str.replace('ems', 'covidregion_') dfout.to_csv(os.path.join(sim_output_path, filename), index=False, date_format='%Y-%m-%d') if __name__ == '__main__' : args = parse_args() stem = args.stem Location = args.Location processStep = args.processStep # 'generate_outputs' datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location) regions = ['All', 'EMS-1', 'EMS-2', 'EMS-3', 'EMS-4', 'EMS-5', 'EMS-6', 'EMS-7', 'EMS-8', 'EMS-9', 'EMS-10','EMS-11'] exp_names = [x for x in os.listdir(os.path.join(wdir, 'simulation_output')) if stem in x] for exp_name in exp_names: simdate = exp_name.split("_")[0] exp_suffix = exp_name.split("_")[-1] scenarioName = get_scenarioName(exp_suffix) sim_output_path = os.path.join(wdir, 'simulation_output', exp_name) plot_path = os.path.join(sim_output_path, '_plots') if processStep == 'generate_outputs' : dfAll = pd.DataFrame() for reg in regions : print( f'Start processing {reg}') tdf = load_and_plot_data(reg, savePlot=True) adf = process_and_save(tdf, reg, SAVE=True) dfAll =
pd.concat([dfAll, adf])
pandas.concat
# Information/Social Epidemiology # # SPEC collaborative # # 2021 from batchrunner_local import BatchRunnerMP from multiprocessing import freeze_support from isepi import ISEpiModel from isepi import Stage from isepi import MobilityType import pandas as pd import json import sys num_procs = int(sys.argv[1]) file_params = sys.argv[2] # Read JSON file with open(file_params) as f: data = json.load(f) print(f"Scenario description") print(f"Description: { data['description'] }") print(f"Prepared by: { data['prepared-by'] }") print(f"Date: { data['date'] }") print("") print("Attempting to configure model from file...") model_params = { "epidemiology": data["model"]["epidemiology"] } var_params = {"dummy": range(25,50,25)} num_iterations = data["ensemble"]["runs"] num_steps = data["ensemble"]["days"] if __name__ == "__main__": freeze_support() batch_run = BatchRunnerMP( ISEpiModel, nr_processes=num_procs, fixed_parameters=model_params, variable_parameters=var_params, iterations=num_iterations, max_steps=num_steps*ISEpiModel.dwell_15_day, model_reporters={ "Step": compute_stepno, "Susceptible": compute_susceptible, "Incubating": compute_incubating, "Asymptomatic": compute_asymptomatic, "SymptQuarantined": compute_symptdetected, "AsymptQuarantined": compute_asymptdetected, "Severe": compute_severe, "Recovered": compute_recovered, "Deceased": compute_deceased, "CummulPrivValue": compute_cumul_private_value, "CummulPublValue": compute_cumul_public_value, "CummulTestCost": compute_cumul_testing_cost, "Rt": compute_eff_reprod_number, "Employed": compute_employed, "Unemployed": compute_unemployed }, display_progress=True) print("Parametrization complete:") print("") print(json.dumps(data, indent=3)) print("") print(f"Executing an ensemble of size {num_iterations} using {num_steps} steps with {num_procs} machine cores...") cm_runs = batch_run.run_all() print("") print("Saving results to file...") ldfs = [] i = 0 for cm in cm_runs.values(): cm["Iteration"] = i ldfs.append(cm) i = i + 1 file_out = data["output"]["prefix"] dfs =
pd.concat(ldfs)
pandas.concat
# tests.test_features.test_jointplot # Test the JointPlot Visualizer # # Author: <NAME> # Created: Mon Apr 10 21:00:54 2017 -0400 # # Copyright (C) 2017 The scikit-yb developers. # For license information, see LICENSE.txt # # ID: test_jointplot.py [9e008b0] <EMAIL> $ """ Test joint plot visualization methods. These tests work differently depending on what version of matplotlib is installed. If version 2.0.2 or greater is installed, then most tests will execute, otherwise the histogram tests will skip and only the warning will be tested. """ ########################################################################## ## Imports ########################################################################## import sys import pytest import numpy as np from functools import partial from unittest.mock import patch, MagicMock from sklearn.datasets import make_classification, make_regression from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase from yellowbrick.exceptions import YellowbrickValueError from yellowbrick.features.jointplot import * from ..fixtures import Dataset try: # Only available in Matplotlib >= 2.0.2 from mpl_toolkits.axes_grid1 import make_axes_locatable except ImportError: make_axes_locatable = None try: import pandas as pd except ImportError: pd = None ########################################################################## ## Fixtures ########################################################################## # Random numpy array generators rand1d = partial(np.random.rand, 120) rand2col = partial(np.random.rand, 120, 2) rand3col = partial(np.random.rand, 120, 3) @pytest.fixture(scope="class") def discrete(request): """ Creates a simple 2-column dataset with a discrete target. """ X, y = make_classification( n_samples=120, n_features=2, n_informative=2, n_redundant=0, n_classes=3, n_clusters_per_class=1, random_state=2221, ) request.cls.discrete = Dataset(X, y) @pytest.fixture(scope="class") def continuous(request): """ Creates a simple 2-column dataset with a continuous target. """ X, y = make_regression(n_samples=120, n_features=2, random_state=1112) request.cls.continuous = Dataset(X, y) ########################################################################## ## JointPlot Tests ########################################################################## @pytest.mark.usefixtures("discrete", "continuous") class TestJointPlotNoHistogram(VisualTestCase): """ Test the JointPlot visualizer without histograms """ def test_invalid_columns_values(self): """ Assert invalid columns arguments raise exception """ with pytest.raises(YellowbrickValueError, match="invalid for joint plot"): JointPlot(columns=["a", "b", "c"], hist=False) def test_invalid_correlation_values(self): """ Assert invalid correlation arguments raise an exception """ with pytest.raises(YellowbrickValueError, match="invalid correlation method"): JointPlot(correlation="foo", hist=False) def test_invalid_kind_values(self): """ Assert invalid kind arguments raise exception """ for bad_kind in ("foo", None, 123): with pytest.raises(YellowbrickValueError, match="invalid joint plot kind"): JointPlot(kind=bad_kind, hist=False) def test_invalid_hist_values(self): """ Assert invalid hist arguments raise exception """ for bad_hist in ("foo", 123): with pytest.raises( YellowbrickValueError, match="invalid argument for hist" ): JointPlot(hist=bad_hist) def test_no_haxes(self): """ Test that xhax and yhax are not available """ oz = JointPlot(hist=False) with pytest.raises(AttributeError, match="histogram for the X axis"): oz.xhax with pytest.raises(AttributeError, match="histogram for the Y axis"): oz.yhax @patch("yellowbrick.features.jointplot.plt") def test_correlation(self, mplt): """ Test correlation is correctly computed """ x = self.discrete.X[:, 0] y = self.discrete.X[:, 1] cases = ( ("pearson", -0.3847799883805261), ("spearman", -0.37301201472324463), ("covariance", -0.5535440619953924), ("kendalltau", -0.2504201680672269), ) for alg, expected in cases: oz = JointPlot(hist=False, correlation=alg, columns=None) oz.ax = MagicMock() oz.fit(x, y) assert hasattr(oz, "corr_") assert oz.corr_ == pytest.approx( expected ), "{} not computed correctly".format(alg) def test_columns_none_invalid_x(self): """ When self.columns=None validate X and y """ bad_kws = ( {"X": rand1d(), "y": None}, {"X": rand3col(), "y": None}, {"X": rand2col(), "y": rand1d()}, {"X": rand3col(), "y": rand1d()}, {"X": rand1d(), "y": rand2col()}, ) for kws in bad_kws: oz = JointPlot(columns=None, hist=False) with pytest.raises( YellowbrickValueError, match="when self.columns is None" ): oz.fit(**kws) def test_columns_none_x_y(self): """ When self.columns=None image similarity with valid X and y """ oz = JointPlot(hist=False, columns=None) assert oz.fit(self.discrete.X[:, 0], self.discrete.y) is oz assert hasattr(oz, "corr_") oz.finalize() # Appveyor and Linux conda fail due to non-text-based differences self.assert_images_similar(oz, tol=2.5) def test_columns_none_x(self): """ When self.columns=None image similarity with valid X, no y """ oz = JointPlot(hist=False, columns=None) assert oz.fit(self.discrete.X) is oz assert hasattr(oz, "corr_") oz.finalize() tol = ( 4.0 if sys.platform == "win32" else 0.01 ) # Fails on AppVeyor with RMS 3.941 self.assert_images_similar(oz, tol=tol) def test_columns_single_index_no_y(self): """ When self.columns=int or str y must not be None """ oz = JointPlot(columns="foo", hist=False) with pytest.raises(YellowbrickValueError, match="y must be specified"): oz.fit(rand2col(), y=None) def test_columns_single_invalid_index_numpy(self): """ When self.columns=int validate the index in X """ oz = JointPlot(columns=2, hist=False) with pytest.raises(IndexError, match="could not index column '2' into type"): oz.fit(self.continuous.X, self.continuous.y) @pytest.mark.skipif(pd is None, reason="test requires pandas") def test_columns_single_invalid_index_pandas(self): """ When self.columns=str validate the index in X """ oz = JointPlot(columns="foo", hist=False) X = pd.DataFrame(self.continuous.X, columns=["a", "b"]) y = pd.Series(self.continuous.y) with pytest.raises(IndexError, match="could not index column 'foo' into type"): oz.fit(X, y) def test_columns_single_int_index_numpy(self): """ When self.columns=int image similarity on numpy dataset """ oz = JointPlot(columns=1, hist=False) assert oz.fit(self.continuous.X, self.continuous.y) is oz assert hasattr(oz, "corr_") oz.finalize() # Appveyor and Linux conda failed based on non-text-based differences self.assert_images_similar(oz, tol=5) @pytest.mark.skipif(pd is None, reason="test requires pandas") def test_columns_single_str_index_pandas(self): """ When self.columns=str image similarity on pandas dataset """ oz = JointPlot(columns="a", hist=False) X = pd.DataFrame(self.continuous.X, columns=["a", "b"]) y = pd.Series(self.continuous.y) assert oz.fit(X, y) is oz assert hasattr(oz, "corr_") oz.finalize() # Appveyor and Linux conda failed based on non-text-based differences self.assert_images_similar(oz, tol=5.5) def test_columns_double_int_index_numpy_no_y(self): """ When self.columns=[int, int] image similarity on numpy dataset no y """ oz = JointPlot(columns=[0, 1], hist=False) assert oz.fit(self.discrete.X, y=None) is oz assert hasattr(oz, "corr_") oz.finalize() tol = ( 4.0 if sys.platform == "win32" else 0.01 ) # Fails on AppVeyor with RMS 3.941 self.assert_images_similar(oz, tol=tol) @pytest.mark.skipif(pd is None, reason="test requires pandas") def test_columns_double_str_index_pandas_no_y(self): """ When self.columns=[str, str] image similarity on pandas dataset no y """ oz = JointPlot(columns=["a", "b"], hist=False) X = pd.DataFrame(self.continuous.X, columns=["a", "b"]) assert oz.fit(X, y=None) is oz assert hasattr(oz, "corr_") oz.finalize() tol = ( 4.0 if sys.platform == "win32" else 0.01 ) # Fails on AppVeyor with RMS 3.911 self.assert_images_similar(oz, tol=tol) @pytest.mark.skipif(pd is None, reason="test requires pandas") def test_columns_double_index_discrete_y(self): """ When self.columns=[str, str] on DataFrame with discrete y """ oz = JointPlot(columns=["a", "b"], hist=False) X = pd.DataFrame(self.discrete.X, columns=["a", "b"]) y =
pd.Series(self.discrete.y)
pandas.Series
import sys sys.path.insert(0, '../LIMEaid/LIMEaid') sys.path.insert(0, '../LIMEaid/LIMEaid/model') import load_college_dataset as lcd import pandas as pd from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegressionCV from sklearn.model_selection import train_test_split from sklearn.exceptions import ConvergenceWarning from sklearn.tree import DecisionTreeClassifier from sklearn import preprocessing import warnings COLUMNTODROP = 'SalaryClass' def fit_multiclass_logistic_regression(printscore=False): """ This function fits sklearn's multiclass logistic regression on the college dataset and returns the model The data values are first scaled using MinMaxScaler and then split into train and test sets before using for fitting ML model """ dataset = lcd.load_college_dataset() x = dataset.drop(COLUMNTODROP, 1) pre_scale_data = x.values min_max_scaler = preprocessing.MinMaxScaler() scaled = min_max_scaler.fit_transform(pre_scale_data) x = pd.DataFrame(scaled) y = dataset[COLUMNTODROP] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0) warnings.filterwarnings("ignore", category=ConvergenceWarning) clf = LogisticRegressionCV(cv=10, random_state=0, multi_class='multinomial').fit(x_train, y_train) # print the accuracy score if the print flag is true if printscore is True: print(clf.score(x_test, y_test)) return clf def fit_random_forest(printscore=False): """ This function fits sklearn's random forest classifier on the college dataset and returns the model The data values are first scaled using MinMaxScaler and then split into train and test sets before using for fitting ML model """ dataset = lcd.load_college_dataset() x = dataset.drop(COLUMNTODROP, 1) pre_scale_data = x.values min_max_scaler = preprocessing.MinMaxScaler() scaled = min_max_scaler.fit_transform(pre_scale_data) x =
pd.DataFrame(scaled)
pandas.DataFrame
import pandas as pd import numpy as np def convert_series_data_2_supervised(data, in_node, out_node, dropna=True): """ 将输入的序列数据通过pandas转换为有监督格式的数据. 参数列表: data: 待转换数据,可以是list表示单变量(或者单特征)序列数据, 也可以是numpy数组,表示多变量(多特征)数据 in_node: 希望转换的输入序列长度(比如希望t-预测t,那么取1, 希望t-2,t-1,预测t那么取2,..etc.) out_node: 希望预测的序列长度(为1,表示预测t,为2表示预测t,t+1,...etc) dropna: 是否丢弃na数据,因为使用pandas的shift方法来进行序列移动, 显然会导致初始几行或者末尾几行出现na数据,默认丢弃 """ # 特征或者变量的数量 num_ftrs = 1 if isinstance(data, (list)) else data.shape[1] # 首先将输入数据转换为pandas的DataFrame df =
pd.DataFrame(data)
pandas.DataFrame
# -*- coding: utf-8 -*- import os import win32com.client import pandas as pd from pandas import Series, DataFrame from eunjeon import Mecab # from konlpy.tag import Komoran # For Test(2021-02-21) # from datetime import datetime import datetime import re import argparse import time import multiprocessing import numpy as np _version_ = '0.40' # Version History # v0.40(2021-08-29): MS Word, PowerPoint, Text 파일에서 단어 추출후 "단어빈도" 시트에 출처(Source) 항목 추가 # v0.30(2021-04-26): DB table, column comment 파일에서 단어 추출후 "단어빈도" 시트에 출처(Source) 항목 추가 # v0.20(2021-02-21): Multiprocessing 적용 버전 # v0.10(2021-01-10): 최초 작성 버전 def get_word_list(df_text) -> DataFrame: """ text 추출결과 DataFrame에서 명사를 추출하여 최종 output을 DataFrame type으로 return :param df_text: 파일에서 추출한 text(DataFrame type) :return: 명사, 복합어(1개 이상의 명사, 접두사+명사+접미사) 추출결과(Dataframe type) """ start_time = time.time() df_result = DataFrame() tagger = Mecab() # tagger = Komoran() row_idx = 0 for index, row in df_text.iterrows(): row_idx += 1 if row_idx % 100 == 0: # 100건마다 현재 진행상태 출력 print('[pid:%d] current: %d, total: %d, progress: %3.2f%%' % (os.getpid(), row_idx, df_text.shape[0], round(row_idx / df_text.shape[0] * 100, 2))) file_name = row['FileName'] file_type = row['FileType'] page = row['Page'] text = str(row['Text']) source = (row['Source']) is_db = True if row['FileType'] in ('table', 'column') else False is_db_table = True if row['FileType'] == 'table' else False is_db_column = True if row['FileType'] == 'column' else False if is_db: db = row['DB'] schema = row['Schema'] table = row['Table'] if is_db_column: column = row['Column'] if text is None or text.strip() == '': continue try: # nouns = mecab.nouns(text) # [O]ToDo: 연속된 체언접두사(XPN), 명사파생접미사(XSN) 까지 포함하여 추출 # [O]ToDo: 명사(NNG, NNP)가 연속될 때 각각 명사와 연결된 복합명사 함께 추출 text_pos = tagger.pos(text) words = [pos for pos, tag in text_pos if tag in ['NNG', 'NNP', 'SL']] # NNG: 일반명사, NNP: 고유명사 pos_list = [x for (x, y) in text_pos] tag_list = [y for (x, y) in text_pos] pos_str = '/'.join(pos_list) + '/' tag_str = '/'.join(tag_list) + '/' iterator = re.finditer('(NNP/|NNG/)+(XSN/)*|(XPN/)+(NNP/|NNG/)+(XSN/)*|(SL/)+', tag_str) for mo in iterator: x, y = mo.span() if x == 0: start_idx = 0 else: start_idx = tag_str[:x].count('/') end_idx = tag_str[:y].count('/') sub_pos = '' # if end_idx - start_idx > 1 and not (start_idx == 0 and end_idx == len(tag_list)): if end_idx - start_idx > 1: for i in range(start_idx, end_idx): sub_pos += pos_list[i] # print('%s[sub_pos]' % sub_pos) words.append('%s[복합어]' % sub_pos) # 추가 형태소 등록 if len(words) >= 1: # print(nouns, text) for word in words: # print(noun, '\t', text) if not is_db: # sr_text = Series([file_name, file_type, page, text, word], # index=['FileName', 'FileType', 'Page', 'Text', 'Word']) df_word = DataFrame( {'FileName': [file_name], 'FileType': [file_type], 'Page': [page], 'Text': [text], 'Word': [word], 'Source': [source]}) elif is_db_table: # sr_text = Series([file_name, file_type, page, text, word, db, schema, table], # index=['FileName', 'FileType', 'Page', 'Text', 'Word', 'DB', 'Schema', 'Table']) df_word = DataFrame( {'FileName': [file_name], 'FileType': [file_type], 'Page': [page], 'Text': [text], 'Word': [word], 'DB': [db], 'Schema': [schema], 'Table': [table], 'Source': [source]}) elif is_db_column: # sr_text = Series([file_name, file_type, page, text, word, db, schema, table, column], # index=['FileName', 'FileType', 'Page', 'Text', 'Word', 'DB', 'Schema', 'Table', 'Column']) df_word = DataFrame( {'FileName': [file_name], 'FileType': [file_type], 'Page': [page], 'Text': [text], 'Word': [word], 'DB': [db], 'Schema': [schema], 'Table': [table], 'Column': [column], 'Source': [source]}) # df_result = df_result.append(sr_text, ignore_index=True) # Todo: append를 concat으로 바꾸기 df_result =
pd.concat([df_result, df_word], ignore_index=True)
pandas.concat
#!/usr/bin/env python # coding: utf-8 # # Coding vs non-coding # K-mer count features extracted from GenCode pc and nc RNA. # Use GenCode 34. # Use one RNA per gene; the one transcript with median length (use floor where count is even). # Same process for protein coding and non-coding. # * Start with GenCode 34 fasta file. # * Run gencode_preprocess.py to make all caps and remove long seqs, short seqs, seqs with N. For each gene with multiple transcripts, choose the one transcript with median length. Among the remaining, remove any sequences that are duplicates of previous ones (We find up to 7 exact duplicate sequences per sequence identifier). # * Run spot_dupes to make sure the dupes are gone. # * Run fasta_to_feature.py to generate CSV file of K-mer counts. # * Run trivial_kmer_counter.py on subset of sequences to verify the K-mer counting. import numpy as np import pandas as pd import matplotlib.pyplot as plt import sys OUTPREFIX="ncRNA.pcRNA" ncfile='ncRNA.2mer.features.csv' pcfile='pcRNA.2mer.features.csv' if len(sys.argv)==3: ncfile=sys.argv[1] pcfile=sys.argv[2] else: print("Wrong number of command-line arguments.") print("Using default input filenames") print("Reading "+ncfile) nc_features = pd.read_csv(ncfile,header=0) print("Reading "+pcfile) pc_features = pd.read_csv(pcfile,header=0) print("Will write files like "+OUTPREFIX+".*.pkl") print("Working...") # ## Generate train set, test set # Introduce the labels 0=non-coding, 1=protein-coding. # We are worried that the longest sequences are a special case. # Use stratified split to ensure an even split of train/test by sequence length. # Manufacture labels for the two datasets nc_labels_temp=[0]*nc_features.shape[0] pc_labels_temp=[1]*pc_features.shape[0] nc_labels=pd.core.frame.DataFrame(nc_labels_temp,columns=['label']) pc_labels=pd.core.frame.DataFrame(pc_labels_temp,columns=['label']) nc_all=
pd.concat([nc_labels,nc_features],axis='columns')
pandas.concat
import pandas as pd import datetime import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates from pandas.plotting import scatter_matrix import yfinance as yf #import talib #%matplotlib inline start = '2017-02-19' end = '2022-2-19' sp500 = yf.download('^GSPC', start, end) # Moving Averages https://www.analyticsvidhya.com/blog/2021/07/stock-prices-analysis-with-python/#h2_5 def MA(data_frame, days): name = 'MA'+str(days) data_frame[name] = data_frame['close'].rolling(days).mean() return data_frame # RSI https://wire.insiderfinance.io/calculate-rsi-with-python-and-yahoo-finance-c8fb78b1c199 def RSI(data, window = 14, adjust = False): delta = data['Close'].diff(1).dropna() loss = delta.copy() gains = delta.copy() gains[gains < 0] = 0 loss[loss > 0] = 0 gain_ewm = gains.ewm(com = window - 1, adjust = adjust).mean() loss_ewm = abs(loss.ewm(com = window - 1, adjust = adjust).mean()) RS = gain_ewm / loss_ewm RSI = 100 - 100/ (1 + RS) return RSI reversed_df = sp500.iloc[::-1] #sp500['RSI'] = talib.RSI(reversed_df['Close'], 14) locator = mdates.MonthLocator(interval = 3) fmt = mdates.DateFormatter('%b') #KDJ https://github.com/Abhay64/KDJ-Indicator array_close = np.array(sp500['Close']) array_high = np.array(sp500['High']) array_low = np.array(sp500['Low']) z = 0 y = 0 highest = 0 lowest = 0 kperiods = 13 #kperiods are 14 array start from 0 index array_highest = [] array_lowest = [] for i in range(0, array_high.size - kperiods): highest = array_high[y] for j in range(0, kperiods): if(highest < array_high[y + 1]): highest = array_high[y + 1] y = y + 1 # creating list highest of k periods array_highest.append(highest) y = y - (kperiods - 1) for i in range(0, array_low.size - kperiods): lowest = array_low[z] for j in range(0, kperiods): if(lowest > array_low[z + 1]): lowest = array_low[z + 1] z = z + 1 # creating list lowest of k periods array_lowest.append(lowest) # skip one from starting after each iteration z = z - (kperiods - 1) #KDJ (K line, D line, J line) # K line Kvalue = [] for i in range(kperiods,array_close.size): k = ((array_close[i] - array_lowest[i - kperiods]) * 100 / (array_highest[i - kperiods] - array_lowest[i - kperiods])) Kvalue.append(k) sp500['K'] = pd.Series(Kvalue) # D line x = 0 # dperiods for calculate d values dperiods = 3 Dvalue = [None, None] mean = 0 for i in range(0, len(Kvalue) - dperiods + 1): sum = 0 for j in range(0, dperiods): sum = Kvalue[x] + sum x = x + 1 mean = sum / dperiods # d values for %d line adding in the list Dvalue Dvalue.append(mean) # skip one from starting after each iteration x = x - (dperiods - 1) sp500['D'] = pd.Series(Dvalue) # J line Jvalue = [None, None] for i in range(0, len(Dvalue) - dperiods + 1): j = (Dvalue[i + 2] * 3) - (Kvalue[i + 2] * 2) # j values for %j line Jvalue.append(j) sp500['J'] = pd.Series(Jvalue) # SP500 sp500_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/SP500_7yr_daily.csv') sp500_frame = pd.DataFrame(sp500_data, columns = ['ticker', 'descr', 'date', 'close', 'retx']) MA(sp500_frame, 50) MA(sp500_frame, 200) # XLB xlb_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLB_7yr_daily.csv') xlb_frame = pd.DataFrame(xlb_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) MA(xlb_frame, 50) MA(xlb_frame, 200) # XLC xlc_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLC_7yr_daily.csv') xlc_frame = pd.DataFrame(xlc_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) MA(xlc_frame, 50) MA(xlc_frame, 200) # XLE xle_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLE_7yr_daily.csv') xle_frame = pd.DataFrame(xle_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) MA(xle_frame, 50) MA(xle_frame, 200) # XLF xlf_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLF_7yr_daily.csv') xlf_frame = pd.DataFrame(xlf_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) MA(xlf_frame, 50) MA(xlf_frame, 200) # XLI xli_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLI_7yr_daily.csv') xli_frame = pd.DataFrame(xli_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) MA(xli_frame, 50) MA(xli_frame, 200) # XLK xlk_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLK_7yr_daily.csv') xlk_frame = pd.DataFrame(xlk_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) MA(xlk_frame, 50) MA(xlk_frame, 200) # XLP xlp_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLP_7yr_daily.csv') xlp_frame = pd.DataFrame(xlp_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) MA(xlp_frame, 50) MA(xlp_frame, 200) # XLRE xlre_data = pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLRE_7yr_daily.csv') xlre_frame = pd.DataFrame(xlre_data, columns = ['ticker', 'descr', 'date', 'low', 'high', 'close', 'vol', 'ret', 'bid', 'ask', 'retx']) MA(xlre_frame, 50) MA(xlre_frame, 200) # XLU xlu_data =
pd.read_csv('~/Documents/Github/IndustryPricePrediction/data/sectors/XLU_7yr_daily.csv')
pandas.read_csv
""" Module for building a complete dataset from local directory with csv files. """ import os import sys from os.path import isfile, join import pandas as pd import json import numpy as np from logbook import Logger, StreamHandler from pandas import read_csv, Timedelta from zipline.utils.calendars import deregister_calendar, get_calendar, register_calendar from zipline.utils.cli import maybe_show_progress from zipline.utils.calendars import ExchangeCalendarFromDate from zipline.assets import AssetDBWriter from zipline.data.minute_bars import BcolzMinuteBarWriter from zipline.data.us_equity_pricing import BcolzDailyBarWriter, SQLiteAdjustmentWriter, BcolzDailyBarReader from . import core as bundles handler = StreamHandler(sys.stdout, format_string=" | {record.message}") logger = Logger(__name__) logger.handlers.append(handler) def xnse_equities(configpath=None): """ Generate an ingest function for custom data bundle This function can be used in ~/.zipline/extension.py to register bundle with custom parameters, e.g. with a custom trading calendar. Parameters ---------- tframes: tuple, optional The data time frames, supported timeframes: 'daily' and 'minute' csvdir : string, optional, default: CSVDIR environment variable The path to the directory of this structure: <directory>/<timeframe1>/<symbol1>.csv <directory>/<timeframe1>/<symbol2>.csv <directory>/<timeframe1>/<symbol3>.csv <directory>/<timeframe2>/<symbol1>.csv <directory>/<timeframe2>/<symbol2>.csv <directory>/<timeframe2>/<symbol3>.csv Returns ------- ingest : callable The bundle ingest function Examples -------- This code should be added to ~/.zipline/extension.py .. code-block:: python from zipline.data.bundles import csvdir_equities, register register('custom-csvdir-bundle', csvdir_equities(["daily", "minute"], '/full/path/to/the/csvdir/directory')) """ return CSVDIRBundleXNSE(configpath).ingest class CSVDIRBundleXNSE: """ Wrapper class to call csvdir_bundle with provided list of time frames and a path to the csvdir directory """ def _read_config(self, configpath): with open(configpath) as configfile: config = json.load(configfile) self.meta_path=config["META_PATH"] self.bundle_path=config["BUNDLE_PATH"] self.daily_path=config["DAILY_PATH"] self.asset_db_name=config["ASSET_DB"] self.adjustment_db_name=config["ADJUSTMENT_DB"] self.metadata_file=config["META_DATA"] self.bizdays_file=config["BIZDAYLIST"] self.symlist_file=config["SYMLIST"] self.benchmark_file=config["BENCHMARKDATA"] self.benchmar_symbol=config["BENCHMARK_SYM"] self.calendar_name=config["CALENDAR_NAME"] self.calendar_tz=config["CALENDAR_TZ"] self.cal_session_start=config["SESSION_START"] self.cal_session_end=config["SESSION_END"] self.cal_minutes_per_day=config["MINUTES_PER_DAY"] def __init__(self, configpath=None): self._read_config(configpath) self.bizdays = self._read_bizdays(join(self.meta_path,self.bizdays_file)) self.calendar = self._create_calendar( self.calendar_name, self.calendar_tz, tuple(self.cal_session_start), tuple(self.cal_session_end), self.bizdays) self.minute_bar_path = join(self.bundle_path,"minute") self.daily_bar_path = join(self.bundle_path,"daily") self.asset_db_path = join(self.bundle_path,self.asset_db_name) self.adjustment_db_path = join(self.bundle_path,self.adjustment_db_name) self.meta_data = self._read_asset_db() self.syms = self._read_allowed_syms() def _read_allowed_syms(self): return self.meta_data['symbol'].tolist() def _read_bizdays(self, strpathmeta): dts = [] if not isfile(strpathmeta): raise ValueError('Business days list is missing') else: dts = read_csv(strpathmeta) #dts = dts['dates'].tolist() dts = pd.to_datetime(dts['dates']).tolist() return sorted(set(dts)) def _create_calendar(self, cal_name,tz,session_start,session_end,dts): cal = ExchangeCalendarFromDate(cal_name,tz,session_start,session_end,dts) try: deregister_calendar(self.calendar_name) get_calendar(self.calendar_name) except: register_calendar(self.calendar_name, cal) return get_calendar(self.calendar_name) def _read_asset_db(self): if not isfile(join(self.meta_path,self.symlist_file)): raise ValueError('symbols metadata list is missing') meta_data = pd.read_csv(join(self.meta_path,self.symlist_file)) meta_data.loc[len(meta_data)] = self.benchmar_symbol,self.benchmar_symbol,self.bizdays[0],self.bizdays[-1] meta_data['start_date'] = pd.to_datetime(meta_data['start_date']) meta_data['end_date'] = pd.to_datetime(meta_data['end_date']) meta_data['auto_close_date'] = pd.to_datetime([e+pd.Timedelta(days=1) for e in meta_data['end_date'].tolist()]) meta_data['exchange'] = 'NSE' return(meta_data) def ingest(self, environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): self.calendar = self._create_calendar( self.calendar_name, self.calendar_tz, self.cal_session_start, self.cal_session_end, self._read_bizdays(join(self.meta_path,self.bizdays_file))) xnse_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, self.calendar, start_session, end_session, cache, show_progress, output_dir, self.daily_path, self.minute_bar_path, self.daily_bar_path, self.asset_db_path, self.adjustment_db_path, self.meta_data, self.meta_path, self.syms, self.bizdays, self.cal_minutes_per_day, self.benchmar_symbol) @bundles.register("XNSE", create_writers=False) def xnse_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir, csvdir=None, minute_bar_path = None, daily_bar_path = None, asset_db_path = None, adjustment_db_path = None, meta_data = None, meta_path = None, syms = None, bizdays = None, minutes_per_day = None, benchmark_symbol = None): """ Build a zipline data bundle from the directory with csv files. """ if not csvdir: raise ValueError("input data directory missing") if not os.path.isdir(csvdir): raise ValueError("%s is not a directory" % csvdir) minute_bar_writer = BcolzMinuteBarWriter(minute_bar_path, calendar, start_session, end_session, minutes_per_day, benchmark_symbol) daily_bar_writer = BcolzDailyBarWriter(daily_bar_path, calendar, start_session, end_session) asset_db_writer = AssetDBWriter(asset_db_path) daily_bar_writer.write(_pricing_iter(csvdir, syms, meta_data, bizdays, show_progress),show_progress=show_progress) meta_data = meta_data.dropna() meta_data = meta_data.reset_index(drop=True) _write_meta_data(asset_db_writer,asset_db_path,meta_data) _write_adjustment_data(adjustment_db_path,meta_data,syms,daily_bar_path, calendar.all_sessions, bizdays, meta_path) def _write_meta_data(asset_db_writer,asset_db_path,meta_data): try: os.remove(asset_db_path) except: pass asset_db_writer.write(equities=meta_data) def _write_adjustment_data(adjustment_db_path,meta_data,syms,daily_bar_path, cal_sessions,bizdays, meta_path): try: os.remove(adjustment_db_path) except: pass adjustment_writer = SQLiteAdjustmentWriter(adjustment_db_path, BcolzDailyBarReader(daily_bar_path), cal_sessions, overwrite=True) meta_dict = dict(zip(meta_data['symbol'].tolist(),range(len(meta_data)))) mergers = pd.read_csv(join(meta_path,"mergers.csv"),parse_dates=[0]) mergers['effective_date'] = pd.to_datetime(mergers['effective_date']) mergers['sid'] = [meta_dict.get(sym,-1) for sym in mergers['symbol'].tolist()] mergers =mergers.drop(['symbol'],axis=1) mergers = mergers[mergers['sid'] != -1] splits = pd.read_csv(join(meta_path,"splits.csv"),parse_dates=[0]) splits['effective_date'] = pd.to_datetime(splits['effective_date']) splits['sid'] = [meta_dict.get(sym,-1) for sym in splits['symbol'].tolist()] splits =splits.drop(['symbol'],axis=1) splits = splits[splits['sid'] != -1] dividends = pd.read_csv(join(meta_path,"dividends.csv"),parse_dates=[0]) dividends['ex_date'] = pd.to_datetime(dividends['ex_date']) dividends['declared_date'] = pd.to_datetime(dividends['declared_date']) dividends['pay_date'] = pd.to_datetime(dividends['pay_date']) dividends['record_date'] = pd.to_datetime(dividends['record_date']) dividends['sid'] = [meta_dict.get(sym,-1) for sym in dividends['symbol'].tolist()] dividends =dividends.drop(['symbol'],axis=1) dividends = dividends[dividends['sid'] != -1] adjustment_writer.write(splits=splits, mergers=mergers, dividends=dividends) def _pricing_iter(csvdir, symbols, meta_data, bizdays, show_progress): with maybe_show_progress(symbols, show_progress, label='Loading custom pricing data: ') as it: files = os.listdir(csvdir) sid = -1 for sid_count, symbol in enumerate(it): logger.debug('%s: sid %s' % (symbol, sid_count)) try: fname = [fname for fname in files if '%s.csv' % symbol in fname][0] except IndexError: raise ValueError("%s.csv file is not in %s" % (symbol, csvdir)) dfr = read_csv(os.path.join(csvdir, fname), parse_dates=[0], infer_datetime_format=True, index_col=0).sort_index() if len(dfr) == 0: print('removing {} as we have no data rows'.format(symbol)) meta_data.loc[meta_data.symbol==symbol,'symbol'] = np.nan continue start_date = pd.to_datetime(meta_data.loc[meta_data.symbol==symbol,'start_date']) end_date = pd.to_datetime(meta_data.loc[meta_data.symbol==symbol,'end_date']) dfr = ensure_all_days(dfr,start_date,end_date, bizdays) if len(dfr) == 0: print('removing {} as we have no data rows'.format(symbol)) meta_data.loc[meta_data.symbol==symbol,'symbol'] = np.nan continue sid = sid + 1 yield sid, dfr def ensure_all_days(dfr, start_date, end_date, bizdays): start_date = start_date.iloc[0] end_date = end_date.iloc[0] delta = end_date - start_date dts = [(start_date + Timedelta(days=x)) for x in range(0, delta.days+1)] dts = pd.to_datetime(dts) bizdays =
pd.to_datetime(bizdays)
pandas.to_datetime
import databricks.koalas as ks import numpy as np # These function can return a Column Expression or a list of columns expression # Must return None if the data type can not be handle import pandas as pd from pyspark.sql import functions as F from optimus.engines.base.commons.functions import word_tokenize from optimus.engines.base.dataframe.functions import DataFrameBaseFunctions from optimus.engines.base.pandas.functions import PandasBaseFunctions from optimus.helpers.core import val_to_list, one_list_to_val from optimus.helpers.raiseit import RaiseIt class SparkFunctions(PandasBaseFunctions, DataFrameBaseFunctions): _engine = ks def _to_float(self, series): """ Converts a series values to floats """ return series.astype("float") def _to_integer(self, series, default=0): """ Converts a series values to integers """ return series.astype("integer") def _to_string(self, series): """ Converts a series values to strings """ return series.astype("str") def _to_boolean(self, series): """ Converts a series values to bool """ return series.astype("bool") def hist(col_name, df, buckets, min_max=None, dtype=None): """ Create a columns expression to calculate a column histogram :param col_name: :param df: :param buckets: :param min_max: Min and max value necessary to calculate the buckets :param dtype: Column datatype to calculate the related histogram. Int, String and Dates return different histograms :return: """ PYSPARK_NUMERIC_TYPES = ["byte", "short", "big", "int", "double", "float"] def is_column_a(df, column, dtypes): """ Check if column match a list of data types :param df: dataframe :param column: column to be compared with :param dtypes: types to be checked :return: """ column = val_to_list(column) if len(column) > 1: RaiseIt.length_error(column, 1) data_type = tuple(val_to_list(parse_spark_dtypes(dtypes))) column = one_list_to_val(column) # Filter columns by data type return isinstance(df.schema[column].dataType, data_type) def create_exprs(_input_col, _buckets, _func): def count_exprs(_exprs): return F.sum(F.when(_exprs, 1).otherwise(0)) _exprs = [] for i, b in enumerate(_buckets): lower = b["lower"] upper = b["upper"] if is_numeric(lower): lower = round(lower, 2) if is_numeric(upper): upper = round(upper, 2) if len(_buckets) == 1: count = count_exprs( (_func(_input_col) == lower)) else: if i == len(_buckets): count = count_exprs( (_func(_input_col) > lower) & (_func(_input_col) <= upper)) else: count = count_exprs( (_func(_input_col) >= lower) & (_func(_input_col) < upper)) info = F.create_map(F.lit("count"), count.cast("int"), F.lit("lower"), F.lit(lower), F.lit("upper"), F.lit(upper)).alias( "hist_agg" + "_" + _input_col + "_" + str(b["bucket"])) _exprs.append(info) _exprs = F.array(*_exprs).alias("hist" + _input_col) return _exprs def hist_numeric(_min_max, _buckets): if _min_max is None: _min_max = df.agg(F.min(col_name).alias("min"), F.max(col_name).alias("max")).to_dict()[0] if _min_max["min"] is not None and _min_max["max"] is not None: _buckets = create_buckets(_min_max["min"], _min_max["max"], _buckets) _exprs = create_exprs(col_name, _buckets, F.col) else: _exprs = None return _exprs def hist_string(_buckets): _buckets = create_buckets(0, 50, _buckets) func = F.length return create_exprs(col_name, _buckets, func) def hist_date(): now = datetime.datetime.now() current_year = now.year oldest_year = 1950 # Year _buckets = create_buckets(oldest_year, current_year, current_year - oldest_year) func = F.year year = create_exprs(col_name, _buckets, func) # Month _buckets = create_buckets(1, 12, 11) func = F.month month = create_exprs(col_name, _buckets, func) # Day _buckets = create_buckets(1, 31, 31) func = F.dayofweek day = create_exprs(col_name, _buckets, func) # Hour _buckets = create_buckets(0, 23, 23) func = F.hour hour = create_exprs(col_name, _buckets, func) # Min _buckets = create_buckets(0, 60, 60) func = F.minute minutes = create_exprs(col_name, _buckets, func) # Second _buckets = create_buckets(0, 60, 60) func = F.second second = create_exprs(col_name, _buckets, func) exprs = F.create_map(F.lit("years"), year, F.lit("months"), month, F.lit("weekdays"), day, F.lit("hours"), hour, F.lit("minutes"), minutes, F.lit("seconds"), second) return exprs if dtype is not None: col_dtype = dtype[col_name]["dtype"] if col_dtype == "int" or col_dtype == "decimal": exprs = hist_numeric(min_max, buckets) elif col_dtype == "string": exprs = hist_string(buckets) elif col_dtype == "date": exprs = hist_date() else: exprs = None else: if is_column_a(df, col_name, PYSPARK_NUMERIC_TYPES): exprs = hist_numeric(min_max, buckets) elif is_column_a(df, col_name, "str"): exprs = hist_string(buckets) elif is_column_a(df, col_name, "date") or is_column_a(df, col_name, "timestamp"): exprs = hist_date() else: exprs = None return exprs def create_exprs(_input_col, _buckets, _func): def count_exprs(_exprs): return F.sum(F.when(_exprs, 1).otherwise(0)) _exprs = [] for i, b in enumerate(_buckets): lower = b["lower"] upper = b["upper"] if is_numeric(lower): lower = round(lower, 2) if is_numeric(upper): upper = round(upper, 2) if len(_buckets) == 1: count = count_exprs( (_func(_input_col) == lower)) else: if i == len(_buckets): count = count_exprs( (_func(_input_col) > lower) & (_func(_input_col) <= upper)) else: count = count_exprs( (_func(_input_col) >= lower) & (_func(_input_col) < upper)) info = F.create_map(F.lit("count"), count.cast("int"), F.lit("lower"), F.lit(lower), F.lit("upper"), F.lit(upper)).alias( "hist_agg" + "_" + _input_col + "_" + str(b["bucket"])) _exprs.append(info) _exprs = F.array(*_exprs).alias("hist" + _input_col) return _exprs @staticmethod def dask_to_compatible(dfd): from optimus.helpers.converter import dask_dataframe_to_pandas return ks.from_pandas(dask_dataframe_to_pandas(dfd)) @staticmethod def new_df(*args, **kwargs): return ks.from_pandas(
pd.DataFrame(*args, **kwargs)
pandas.DataFrame
from collections import abc, deque from decimal import Decimal from io import StringIO from warnings import catch_warnings import numpy as np from numpy.random import randn import pytest from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd from pandas import ( Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series, concat, date_range, read_csv, ) import pandas._testing as tm from pandas.core.arrays import SparseArray from pandas.core.construction import create_series_with_explicit_dtype from pandas.tests.extension.decimal import to_decimal @pytest.fixture(params=[True, False]) def sort(request): """Boolean sort keyword for concat and DataFrame.append.""" return request.param class TestConcatenate: def test_concat_copy(self): df = DataFrame(np.random.randn(4, 3)) df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1)) df3 = DataFrame({5: "foo"}, index=range(4)) # These are actual copies. result = concat([df, df2, df3], axis=1, copy=True) for b in result._mgr.blocks: assert b.values.base is None # These are the same. result = concat([df, df2, df3], axis=1, copy=False) for b in result._mgr.blocks: if b.is_float: assert b.values.base is df._mgr.blocks[0].values.base elif b.is_integer: assert b.values.base is df2._mgr.blocks[0].values.base elif b.is_object: assert b.values.base is not None # Float block was consolidated. df4 = DataFrame(np.random.randn(4, 1)) result = concat([df, df2, df3, df4], axis=1, copy=False) for b in result._mgr.blocks: if b.is_float: assert b.values.base is None elif b.is_integer: assert b.values.base is df2._mgr.blocks[0].values.base elif b.is_object: assert b.values.base is not None def test_concat_with_group_keys(self): df = DataFrame(np.random.randn(4, 3)) df2 = DataFrame(np.random.randn(4, 4)) # axis=0 df = DataFrame(np.random.randn(3, 4)) df2 = DataFrame(np.random.randn(4, 4)) result = concat([df, df2], keys=[0, 1]) exp_index = MultiIndex.from_arrays( [[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]] ) expected = DataFrame(np.r_[df.values, df2.values], index=exp_index) tm.assert_frame_equal(result, expected) result = concat([df, df], keys=[0, 1]) exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]) expected = DataFrame(np.r_[df.values, df.values], index=exp_index2) tm.assert_frame_equal(result, expected) # axis=1 df = DataFrame(np.random.randn(4, 3)) df2 = DataFrame(np.random.randn(4, 4)) result = concat([df, df2], keys=[0, 1], axis=1) expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index) tm.assert_frame_equal(result, expected) result = concat([df, df], keys=[0, 1], axis=1) expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2) tm.assert_frame_equal(result, expected) def test_concat_keys_specific_levels(self): df = DataFrame(np.random.randn(10, 4)) pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]] level = ["three", "two", "one", "zero"] result = concat( pieces, axis=1, keys=["one", "two", "three"], levels=[level], names=["group_key"], ) tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key")) tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3])) assert result.columns.names == ["group_key", None] def test_concat_dataframe_keys_bug(self, sort): t1 = DataFrame( {"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))} ) t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))}) # it works result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort) assert list(result.columns) == [("t1", "value"), ("t2", "value")] def test_concat_series_partial_columns_names(self): # GH10698 foo = Series([1, 2], name="foo") bar = Series([1, 2]) baz = Series([4, 5]) result = concat([foo, bar, baz], axis=1) expected = DataFrame( {"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1] ) tm.assert_frame_equal(result, expected) result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"]) expected = DataFrame( {"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]}, columns=["red", "blue", "yellow"], ) tm.assert_frame_equal(result, expected) result = concat([foo, bar, baz], axis=1, ignore_index=True) expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]}) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("mapping", ["mapping", "dict"]) def test_concat_mapping(self, mapping, non_dict_mapping_subclass): constructor = dict if mapping == "dict" else non_dict_mapping_subclass frames = constructor( { "foo": DataFrame(np.random.randn(4, 3)), "bar": DataFrame(np.random.randn(4, 3)), "baz": DataFrame(np.random.randn(4, 3)), "qux": DataFrame(np.random.randn(4, 3)), } ) sorted_keys = list(frames.keys()) result = concat(frames) expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys) tm.assert_frame_equal(result, expected) result = concat(frames, axis=1) expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1) tm.assert_frame_equal(result, expected) keys = ["baz", "foo", "bar"] result = concat(frames, keys=keys) expected = concat([frames[k] for k in keys], keys=keys) tm.assert_frame_equal(result, expected) def test_concat_ignore_index(self, sort): frame1 = DataFrame( {"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]} ) frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]}) frame1.index = Index(["x", "y", "z"]) frame2.index = Index(["x", "y", "q"]) v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort) nan = np.nan expected = DataFrame( [ [nan, nan, nan, 4.3], ["a", 1, 4.5, 5.2], ["b", 2, 3.2, 2.2], ["c", 3, 1.2, nan], ], index=Index(["q", "x", "y", "z"]), ) if not sort: expected = expected.loc[["x", "y", "z", "q"]] tm.assert_frame_equal(v1, expected) @pytest.mark.parametrize( "name_in1,name_in2,name_in3,name_out", [ ("idx", "idx", "idx", "idx"), ("idx", "idx", None, None), ("idx", None, None, None), ("idx1", "idx2", None, None), ("idx1", "idx1", "idx2", None), ("idx1", "idx2", "idx3", None), (None, None, None, None), ], ) def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out): # GH13475 indices = [ Index(["a", "b", "c"], name=name_in1), Index(["b", "c", "d"], name=name_in2), Index(["c", "d", "e"], name=name_in3), ] frames = [ DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"]) ] result = pd.concat(frames, axis=1) exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out) expected = DataFrame( { "x": [0, 1, 2, np.nan, np.nan], "y": [np.nan, 0, 1, 2, np.nan], "z": [np.nan, np.nan, 0, 1, 2], }, index=exp_ind, ) tm.assert_frame_equal(result, expected) def test_concat_multiindex_with_keys(self): index = MultiIndex( levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=["first", "second"], ) frame = DataFrame( np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp"), ) result = concat([frame, frame], keys=[0, 1], names=["iteration"]) assert result.index.names == ("iteration",) + index.names tm.assert_frame_equal(result.loc[0], frame) tm.assert_frame_equal(result.loc[1], frame) assert result.index.nlevels == 3 def test_concat_multiindex_with_none_in_index_names(self): # GH 15787 index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None]) df = DataFrame({"col": range(5)}, index=index, dtype=np.int32) result = concat([df, df], keys=[1, 2], names=["level2"]) index = pd.MultiIndex.from_product( [[1, 2], [1], range(5)], names=["level2", "level1", None] ) expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32) tm.assert_frame_equal(result, expected) result = concat([df, df[:2]], keys=[1, 2], names=["level2"]) level2 = [1] * 5 + [2] * 2 level1 = [1] * 7 no_name = list(range(5)) + list(range(2)) tuples = list(zip(level2, level1, no_name)) index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None]) expected = DataFrame({"col": no_name}, index=index, dtype=np.int32) tm.assert_frame_equal(result, expected) def test_concat_keys_and_levels(self): df = DataFrame(np.random.randn(1, 3)) df2 = DataFrame(np.random.randn(1, 4)) levels = [["foo", "baz"], ["one", "two"]] names = ["first", "second"] result = concat( [df, df2, df, df2], keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], levels=levels, names=names, ) expected = concat([df, df2, df, df2]) exp_index = MultiIndex( levels=levels + [[0]], codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]], names=names + [None], ) expected.index = exp_index tm.assert_frame_equal(result, expected) # no names result = concat( [df, df2, df, df2], keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], levels=levels, ) assert result.index.names == (None,) * 3 # no levels result = concat( [df, df2, df, df2], keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], names=["first", "second"], ) assert result.index.names == ("first", "second", None) tm.assert_index_equal( result.index.levels[0], Index(["baz", "foo"], name="first") ) def test_concat_keys_levels_no_overlap(self): # GH #1406 df = DataFrame(np.random.randn(1, 3), index=["a"]) df2 = DataFrame(np.random.randn(1, 4), index=["b"]) msg = "Values not found in passed level" with pytest.raises(ValueError, match=msg): concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]]) msg = "Key one not in level" with pytest.raises(ValueError, match=msg): concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]]) def test_concat_rename_index(self): a = DataFrame( np.random.rand(3, 3), columns=list("ABC"), index=Index(list("abc"), name="index_a"), ) b = DataFrame( np.random.rand(3, 3), columns=list("ABC"), index=Index(list("abc"), name="index_b"), ) result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"]) exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"]) names = list(exp.index.names) names[1] = "lvl1" exp.index.set_names(names, inplace=True) tm.assert_frame_equal(result, exp) assert result.index.names == exp.index.names def test_crossed_dtypes_weird_corner(self): columns = ["A", "B", "C", "D"] df1 = DataFrame( { "A": np.array([1, 2, 3, 4], dtype="f8"), "B": np.array([1, 2, 3, 4], dtype="i8"), "C": np.array([1, 2, 3, 4], dtype="f8"), "D": np.array([1, 2, 3, 4], dtype="i8"), }, columns=columns, ) df2 = DataFrame( { "A": np.array([1, 2, 3, 4], dtype="i8"), "B": np.array([1, 2, 3, 4], dtype="f8"), "C": np.array([1, 2, 3, 4], dtype="i8"), "D": np.array([1, 2, 3, 4], dtype="f8"), }, columns=columns, ) appended = df1.append(df2, ignore_index=True) expected = DataFrame( np.concatenate([df1.values, df2.values], axis=0), columns=columns ) tm.assert_frame_equal(appended, expected) df = DataFrame(np.random.randn(1, 3), index=["a"]) df2 = DataFrame(np.random.randn(1, 4), index=["b"]) result = concat([df, df2], keys=["one", "two"], names=["first", "second"]) assert result.index.names == ("first", "second") def test_dups_index(self): # GH 4771 # single dtypes df = DataFrame( np.random.randint(0, 10, size=40).reshape(10, 4), columns=["A", "A", "C", "C"], ) result = concat([df, df], axis=1) tm.assert_frame_equal(result.iloc[:, :4], df) tm.assert_frame_equal(result.iloc[:, 4:], df) result = concat([df, df], axis=0) tm.assert_frame_equal(result.iloc[:10], df) tm.assert_frame_equal(result.iloc[10:], df) # multi dtypes df = concat( [ DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]), DataFrame( np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"] ), ], axis=1, ) result = concat([df, df], axis=1) tm.assert_frame_equal(result.iloc[:, :6], df) tm.assert_frame_equal(result.iloc[:, 6:], df) result = concat([df, df], axis=0) tm.assert_frame_equal(result.iloc[:10], df) tm.assert_frame_equal(result.iloc[10:], df) # append result = df.iloc[0:8, :].append(df.iloc[8:]) tm.assert_frame_equal(result, df) result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10]) tm.assert_frame_equal(result, df) expected = concat([df, df], axis=0) result = df.append(df) tm.assert_frame_equal(result, expected) def test_with_mixed_tuples(self, sort): # 10697 # columns have mixed tuples, so handle properly df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2)) df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2)) # it works concat([df1, df2], sort=sort) def test_handle_empty_objects(self, sort): df = DataFrame(np.random.randn(10, 4), columns=list("abcd")) baz = df[:5].copy() baz["foo"] = "bar" empty = df[5:5] frames = [baz, empty, empty, df[5:]] concatted = concat(frames, axis=0, sort=sort) expected = df.reindex(columns=["a", "b", "c", "d", "foo"]) expected["foo"] = expected["foo"].astype("O") expected.loc[0:4, "foo"] = "bar" tm.assert_frame_equal(concatted, expected) # empty as first element with time series # GH3259 df = DataFrame( dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s") ) empty = DataFrame() result = concat([df, empty], axis=1) tm.assert_frame_equal(result, df) result = concat([empty, df], axis=1) tm.assert_frame_equal(result, df) result = concat([df, empty]) tm.assert_frame_equal(result, df) result = concat([empty, df]) tm.assert_frame_equal(result, df) def test_concat_mixed_objs(self): # concat mixed series/frames # G2385 # axis 1 index = date_range("01-Jan-2013", periods=10, freq="H") arr = np.arange(10, dtype="int64") s1 = Series(arr, index=index) s2 = Series(arr, index=index) df = DataFrame(arr.reshape(-1, 1), index=index) expected = DataFrame( np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0] ) result = concat([df, df], axis=1) tm.assert_frame_equal(result, expected) expected = DataFrame( np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1] ) result = concat([s1, s2], axis=1) tm.assert_frame_equal(result, expected) expected = DataFrame( np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2] ) result = concat([s1, s2, s1], axis=1) tm.assert_frame_equal(result, expected) expected = DataFrame( np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3] ) result = concat([s1, df, s2, s2, s1], axis=1) tm.assert_frame_equal(result, expected) # with names s1.name = "foo" expected = DataFrame( np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0] ) result = concat([s1, df, s2], axis=1) tm.assert_frame_equal(result, expected) s2.name = "bar" expected = DataFrame( np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"] ) result = concat([s1, df, s2], axis=1) tm.assert_frame_equal(result, expected) # ignore index expected = DataFrame( np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2] ) result = concat([s1, df, s2], axis=1, ignore_index=True) tm.assert_frame_equal(result, expected) # axis 0 expected = DataFrame( np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0] ) result = concat([s1, df, s2]) tm.assert_frame_equal(result, expected) expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0]) result = concat([s1, df, s2], ignore_index=True) tm.assert_frame_equal(result, expected) def test_empty_dtype_coerce(self): # xref to #12411 # xref to #12045 # xref to #11594 # see below # 10571 df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"]) df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"]) result = concat([df1, df2]) expected = df1.dtypes tm.assert_series_equal(result.dtypes, expected) def test_dtype_coerceion(self): # 12411 df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]}) result = concat([df.iloc[[0]], df.iloc[[1]]]) tm.assert_series_equal(result.dtypes, df.dtypes) # 12045 import datetime df = DataFrame( {"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]} ) result = concat([df.iloc[[0]], df.iloc[[1]]]) tm.assert_series_equal(result.dtypes, df.dtypes) # 11594 df = DataFrame({"text": ["some words"] + [None] * 9}) result = concat([df.iloc[[0]], df.iloc[[1]]]) tm.assert_series_equal(result.dtypes, df.dtypes) def test_concat_series(self): ts = tm.makeTimeSeries() ts.name = "foo" pieces = [ts[:5], ts[5:15], ts[15:]] result = concat(pieces) tm.assert_series_equal(result, ts) assert result.name == ts.name result = concat(pieces, keys=[0, 1, 2]) expected = ts.copy() ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]")) exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))] exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes) expected.index = exp_index tm.assert_series_equal(result, expected) def test_concat_series_axis1(self, sort=sort): ts = tm.makeTimeSeries() pieces = [ts[:-2], ts[2:], ts[2:-2]] result = concat(pieces, axis=1) expected = DataFrame(pieces).T tm.assert_frame_equal(result, expected) result = concat(pieces, keys=["A", "B", "C"], axis=1) expected = DataFrame(pieces, index=["A", "B", "C"]).T tm.assert_frame_equal(result, expected) # preserve series names, #2489 s = Series(randn(5), name="A") s2 = Series(randn(5), name="B") result = concat([s, s2], axis=1) expected = DataFrame({"A": s, "B": s2}) tm.assert_frame_equal(result, expected) s2.name = None result = concat([s, s2], axis=1) tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object")) # must reindex, #2603 s = Series(randn(3), index=["c", "a", "b"], name="A") s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B") result = concat([s, s2], axis=1, sort=sort) expected = DataFrame({"A": s, "B": s2}) tm.assert_frame_equal(result, expected) def test_concat_series_axis1_names_applied(self): # ensure names argument is not ignored on axis=1, #23490 s = Series([1, 2, 3]) s2 = Series([4, 5, 6]) result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"]) expected = DataFrame( [[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A") ) tm.assert_frame_equal(result, expected) result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"]) expected = DataFrame( [[1, 4], [2, 5], [3, 6]], columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]), ) tm.assert_frame_equal(result, expected) def test_concat_single_with_key(self): df = DataFrame(np.random.randn(10, 4)) result =
concat([df], keys=["foo"])
pandas.concat
import os import numpy as np import matplotlib.pyplot as plt import pandas as pd from qod_fvm.np_to_xmf import NpArray2Xmf from qod_fvm import utils utils.plt_style() CHECK_TIME_STEP = 0 TEST_WRITE_FUNCTION = 0 SHOW = 0 class Field: def __init__(self, list_of_cell): self.lst_cell = list_of_cell self.lst_BC = [] self.residuals_adv = np.zeros_like(self.get_flux_cc_matrix()) self.residuals_diff = np.zeros_like(self.residuals_adv) self.residuals_src_terms = np.zeros_like(self.residuals_adv) def get_xi(self): out = [] for _cell in self.lst_cell: out.append(_cell.x_i) return np.array(out) def get_diam(self): out = [] for _cell in self.lst_cell: out.append(_cell.diam) return np.array(out) def get_area(self): out = [] for _cell in self.lst_cell: out.append(_cell.area) return np.array(out) def get_T(self): out = [] for _cell in self.lst_cell: out.append(_cell.get_T()) return np.array(out) def get_P(self): out = [] for _cell in self.lst_cell: out.append(_cell.get_P()) return np.array(out) def get_rho(self): out = [] for _cell in self.lst_cell: out.append(_cell.rho) return np.array(out) def get_u(self): out = [] for _cell in self.lst_cell: out.append(_cell.get_u()) return np.array(out) def get_rho_u(self): out = [] for _cell in self.lst_cell: out.append(_cell.get_rhou()) return np.array(out) def get_rho_e(self): out = [] for _cell in self.lst_cell: out.append(_cell.rho_E) return np.array(out) def get_r_gas(self): out = [] for _cell in self.lst_cell: out.append(_cell.r_gas) return np.array(out) def get_gamma(self): out = [] for _cell in self.lst_cell: out.append(_cell.gamma) return np.array(out) def get_dx(self): out = [] for _cell in self.lst_cell: out.append(_cell.dx) return np.array(out) def get_sos(self): """ Get speed of sound :return: sos """ out = [] for _cell in self.lst_cell: out.append(_cell.get_sos()) return np.array(out) def get_mach(self): return self.get_u() / self.get_sos() def add_source_term_p(self): x_pos = self.get_xi() pres = self.get_P() grad_p = np.gradient(pres, x_pos) for _idx_cell, _cell in enumerate(self.lst_cell): _cell.s_cons[_cell.idx_momentum] = _cell.area * grad_p[_idx_cell] def add_source_term_energy(self): raise NotImplementedError("This source term should not be used. the source terms are in the fluxes") x_pos = self.get_xi() pres = self.get_P() area = self.get_area() u_vel = self.get_u() grad_p_energy = np.gradient(pres * area * u_vel, x_pos) for _idx_cell, _cell in enumerate(self.lst_cell): _cell.s_cons[_cell.idx_energy] = grad_p_energy[_idx_cell] def compute_time_step(self, cfl, step): u_arr = self.get_u() sos = self.get_sos() delta_x = self.get_dx() local_dt = cfl * delta_x / np.maximum(np.abs(u_arr + sos), np.abs(u_arr - sos)) if CHECK_TIME_STEP: fig, axes = plt.subplots(2, 2, sharex=True, figsize=(6, 4.5)) plt.suptitle('Global dt = %e' % np.amin(local_dt)) axes[0, 0].plot(self.get_xi(), delta_x) axes[0, 0].set_ylabel(r"$\Delta x$ [m]") axes[0, 1].plot(self.get_xi(), sos) axes[0, 1].set_ylabel(r"Speed of sound [m/s]") axes[1, 0].plot(self.get_xi(), u_arr) axes[1, 0].set_ylabel(r"Velocity [m/s]") axes[1, 1].plot(self.get_xi(), local_dt) axes[1, 1].set_ylabel(r"local $\Delta t$ [m/s]") for idx in range(2): axes[1, idx].set_xlabel("x [m]") if not step: utils.savefig_check('local_dt') else: utils.savefig_check('local_dt_%6d' % step) if SHOW: plt.show() plt.close() dt_min = np.amin(local_dt) # Check for NaN assert (dt_min == dt_min) return dt_min def update_vec_from_var(self): for _cell in self.lst_cell: _cell.update_vec_from_var() def update_var_from_vec(self): for _cell in self.lst_cell: _cell.update_var_from_vec() def prim_to_cons(self): for _cell in self.lst_cell: _cell.prim_to_cons() def cons_to_prim(self): for _cell in self.lst_cell: _cell.cons_to_prim() def get_cons_matrix(self): n_cells = len(self.lst_cell) n_cons = self.lst_cell[0].n_transport_eq w_cons_mat = np.zeros((n_cells, n_cons)) for _idx, _cell in enumerate(self.lst_cell): w_cons_mat[_idx, :] = _cell.w_cons return w_cons_mat def get_flux_cc_matrix(self): """cell centered fluxes""" n_cells = len(self.lst_cell) n_cons = self.lst_cell[0].n_transport_eq f_cons_mat = np.zeros((n_cells, n_cons)) for _idx, _cell in enumerate(self.lst_cell): f_cons_mat[_idx, :] = _cell.f_cons return f_cons_mat def get_source_terms_matrix(self): """cell centered fluxes""" n_cells = len(self.lst_cell) n_cons = self.lst_cell[0].n_transport_eq s_cons_mat = np.zeros((n_cells, n_cons)) for _idx, _cell in enumerate(self.lst_cell): s_cons_mat[_idx, :] = _cell.s_cons return s_cons_mat def write_output(self, iteration, time, params_IO): self.update_var_from_vec() dict_out = {} dict_out['x'] = self.get_xi() dict_out['diam'] = self.get_diam() dict_out['area'] = self.get_area() dict_out['P'] = self.get_P() dict_out['T'] = self.get_T() dict_out['rho'] = self.get_rho() dict_out['u'] = self.get_u() dict_out['rhou'] = self.get_rho_u() dict_out['rhoe'] = self.get_rho_e() dict_out['r_gas'] = self.get_r_gas() dict_out['gamma'] = self.get_gamma() dict_out['mach'] = self.get_mach() dict_out['sos'] = self.get_sos() dict_out['iteration'] = iteration * np.ones(len(self.lst_cell)) dict_out['time'] = time * np.ones(len(self.lst_cell)) path = params_IO['directory'] sol_name = os.path.join(path, "solution_%08d" % iteration) print("\t--> Saving %s.csv" % sol_name) df =
pd.DataFrame.from_dict(dict_out)
pandas.DataFrame.from_dict
# coding: utf-8 import numpy as np import pandas as pd import scipy.misc as spm import encode_decode, parse_vi import get_substring_representativeness as rep import argparse, os.path def to_int_code(string, encoder): try: out = int(string) except: out = encoder[string] return out def reformat_symbols(symbol): return symbol.replace('r',u'ɾ').replace(u'ä','a').replace('9',u'ɰ̃') if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('likelihood_csv', type=str, help='Path to the csv file containing likelihood info.') parser.add_argument('result_dir', type=str, help='Path to the directory containing learning results.') parser.add_argument('frequency_csv', type=str, help='Path to the csv file containing frequency info.') parser.add_argument('string_length', type=int, help='Length of substrings to rank.') args = parser.parse_args() df_code = pd.read_csv(os.path.join(args.result_dir, 'symbol_coding.csv'), encoding='utf-8') encoder,decoder = encode_decode.df2coder(df_code) df_like = pd.read_csv(args.likelihood_csv, encoding='utf-8') string_cols = sorted([col for col in df_like.columns.tolist() if col.startswith('symbol_')])[-args.string_length:] df_like = df_like.groupby(string_cols+['sublex']).sum().reset_index() df_like['log_like'] = df_like.prob.map(np.ma.log) df_freq = pd.read_csv(args.frequency_csv, encoding='utf-8') df_freq = df_freq.rename(columns={'value':string_cols[-1],'sublex_id':'sublex'}) df_freq[string_cols[-1]] = df_freq[string_cols[-1]].map(lambda s: to_int_code(s, encoder)) if args.string_length > 1: df_freq = pd.concat([ df_freq, df_freq.context.str.split('_', expand=True).rename(columns={ix:col for ix,col in enumerate(string_cols[:-1])}).applymap(lambda s: to_int_code(s, encoder)) ], axis=1) df_like =
pd.merge(df_like, df_freq, on=string_cols+['sublex'])
pandas.merge
# -*- coding: utf-8 -*- from unittest import TestCase from parameterized import parameterized import pandas as pd import numpy as np from numpy.testing.utils import assert_array_equal from pandas import (MultiIndex, Index) from pandas.util.testing import assert_frame_equal, assert_series_equal from alphaware.enums import OutputDataFormat, FreqType from alphaware.const import INDEX_FACTOR from alphaware.utils import (convert_df_format, top, group_by_freq, fwd_return, weighted_rank) from datetime import datetime as dt class TestPandasUtils(TestCase): @parameterized.expand([(pd.DataFrame({'001': [1, 2, 3], '002': [2, 3, 4]}, index=['2014', '2015', '2016']), OutputDataFormat.MULTI_INDEX_DF, 'test_factor', INDEX_FACTOR, pd.DataFrame(index=MultiIndex(levels=[['2014', '2015', '2016'], ['001', '002']], labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['trade_date', 'ticker']), data=[1, 2, 2, 3, 3, 4], columns=['test_factor']))]) def test_convert_df_format_1(self, data, target_format, col_name, multi_index, expected): calculated = convert_df_format(data, target_format, col_name, multi_index) assert_frame_equal(calculated, expected) @parameterized.expand( [(pd.DataFrame( index=MultiIndex.from_product([['2014', '2015', '2016'], ['001', '002']], names=['trade_date', 'ticker']), data=[1, 2, 3, 4, 5, 6], columns=['factor']), OutputDataFormat.PITVOT_TABLE_DF, 'factor', INDEX_FACTOR, pd.DataFrame({'001': [1, 3, 5], '002': [2, 4, 6]}, index=Index(['2014', '2015', '2016'], name='trade_date')))]) def test_convert_df_format_2(self, data, target_format, col_name, multi_index, expected): calculated = convert_df_format(data, target_format, col_name, multi_index) assert_frame_equal(calculated, expected) @parameterized.expand( [(pd.DataFrame(data=[[1, 23, 4, 5], [4, 5, 7, 8], [10, 5, 11, 8], [34, 65, 27, 78]], columns=['A', 'B', 'C', 'D']), 2, ['A'], pd.DataFrame(data=[[34, 65, 27, 78], [10, 5, 11, 8]], index=[3, 2], columns=['A', 'B', 'C', 'D']) )]) def test_top_1(self, data, n, column, expected): calculated = top(data, column=column, n=n) assert_frame_equal(calculated, expected) @parameterized.expand( [(pd.Series(data=[35, 12, 45, 79, 123, 74, 35]), 3, pd.Series(data=[123, 79, 74], index=[4, 3, 5]) )]) def test_top_2(self, data, n, expected): calculated = top(data, n=n)
assert_series_equal(calculated, expected)
pandas.util.testing.assert_series_equal
from requests import Session from bs4 import BeautifulSoup import pandas as pd HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '\ 'AppleWebKit/537.36 (KHTML, like Gecko) '\ 'Chrome/75.0.3770.80 Safari/537.36'} def zacks_extract(ratio_name, period='weekly_'): """ Function to extract Ratios from Zacks """ # Create Empty list list_to_append = [] # Read list of stocks and get all symbols stocks = pd.read_csv('../docs/my_stocks.csv') list_of_stocks = stocks['symbol'] # Start loop by creating empty list and calculate lenght, so we can track completion lenght = len(list_of_stocks) # Create Session s = Session() # Add headers s.headers.update(HEADERS) # JSON Key Field json_field = period + ratio_name # For every single stock, do the following for idx, stock in enumerate(list_of_stocks): # Print Progress print((idx+1)/lenght) # Create URL url = f'https://widget3.zacks.com/data/chart/json/{stock}/' + ratio_name + '/www.zacks.com' # Request and transform response in json screener = s.get(url) json = screener.json() # Check for error if len(json) > 1: try: # Append results into list [list_to_append.append([i[0], i[1], stock]) for idx, i in enumerate(json[json_field].items()) if idx < 300] except (KeyError, AttributeError) as e: continue # Create dataframe with results df = pd.DataFrame(list_to_append) df.columns = ['timestamp', ratio_name, 'symbol'] # Export df['timestamp'] = pd.to_datetime(df['timestamp']) filepath = '../docs/' + ratio_name + '.csv' df.to_csv(filepath, index=0) return df def merge_ratio(df, all_prices, ratio_name): """ Function to merge ratio with all prices """ # Metric Dictionary dic = {'pe_ratio': 'eps_ttm', 'price_to_book_value': 'book_value_ttm'} # Field name field_name = dic[ratio_name] # Rename columns, convert column to datetime and keep only records where date > 2017-01-01 df.columns = ['timestamp_merge', ratio_name, 'symbol'] df['timestamp_merge'] = pd.to_datetime(df['timestamp_merge']) df = df[df['timestamp_merge'] > '2014-01-01'] # Convert all prices column to datetime all_prices['just_date_merge'] = pd.to_datetime(all_prices['just_date']) # Merge both dataframes merge_df = pd.merge(all_prices, df, left_on = ['just_date_merge', 'symbol'], right_on = ['timestamp_merge', 'symbol'], how='left') # Calculate EPS TTM based on weekly PE Ratios merge_df[field_name] = merge_df['close_price'] / merge_df[ratio_name] merge_df[field_name] = merge_df[field_name].round(3) # Since we have only Weekly Value we can Forward/Backward Fill the EPS TTM merge_df[field_name] = merge_df.groupby('symbol').ffill()[field_name] merge_df[field_name] = merge_df.groupby('symbol').bfill()[field_name] # Calculate PE Ratio with EPS TTM and round numbers merge_df[ratio_name] = merge_df['close_price'] / merge_df[field_name] merge_df[ratio_name] = merge_df[ratio_name].round(3) # Drop columns merge_df.drop(['just_date_merge', 'timestamp_merge'], inplace=True, axis=1) # Export merge_df.to_csv('../docs/' + field_name + '.csv', index=0) return merge_df def pe_analysis(prices_pe): # Calculate Average Market Cap by Industry/Sector overtime daily_pe_ratio_mean = prices_pe[['just_date', 'industry', 'sector', 'pe_ratio']].groupby(['just_date', 'industry', 'sector']).mean()['pe_ratio'] daily_pe_ratio_std = prices_pe[['just_date', 'industry', 'sector', 'pe_ratio']].groupby(['just_date', 'industry', 'sector']).std()['pe_ratio'] # Convert to Data Frame daily_pe_ratio_mean = daily_pe_ratio_mean.reset_index() daily_pe_ratio_std = daily_pe_ratio_std.reset_index() # Rename Columns daily_pe_ratio_mean.columns = ['just_date', 'industry', 'sector', 'avg_pe_ratio'] daily_pe_ratio_std.columns = ['just_date', 'industry', 'sector', 'std_pe_ratio'] # Merge with main daily_pe_stats =
pd.merge(daily_pe_ratio_mean, daily_pe_ratio_std, on=['just_date', 'industry', 'sector'])
pandas.merge
import json import pandas as pd import requests from . import df_to_tempfile class Scriptor: def __init__(self, **kwargs): self.__dict__.update(kwargs) def ingest(self) -> pd.DataFrame: url = "https://www.nycgovparks.org/bigapps/DPR_CapitalProjectTracker_001.json" data = json.loads(requests.get(url).content) df = pd.DataFrame(data) df = df[["TrackerID", "FMSID", "Title", "TotalFunding", "Locations"]] df["Locations"] = df["Locations"].apply(lambda x: x.get("Location")) df2 = df.drop(columns=["Locations"]).join(df["Locations"].explode().to_frame()) horiz_exploded = pd.json_normalize(df2["Locations"]) horiz_exploded.index = df2.index df3 =
pd.concat([df2, horiz_exploded], axis=1)
pandas.concat
""" The DataOracle class reads real historical trade data (not price or quote) from a given date in history to be resimulated. It stores these trades in a time-sorted array at maximum resolution. It can be called by certain "background" agents to obtain noisy observations about the "real" price of a stock at a current time. It is intended to provide some realistic behavior and "price gravity" to the simulated market -- i.e. to make the market behave something like historical reality in the absence of whatever experiment we are running with more active agent types. """ import datetime as dt import os from bisect import bisect_left from math import sqrt, exp from typing import List import numpy as np import pandas as pd from joblib import Memory from backtesting.oracle.base import Oracle from backtesting.typing import FileName from backtesting.utils.util import log_print mem = Memory(cachedir='./cache', verbose=0) __all__ = ( "DataOracle", "ExternalFileOracle", "MeanRevertingOracle", "SparseMeanRevertingOracle" ) # @mem.cache def read_trades(trade_file: FileName, symbols: List[str]) -> pd.DataFrame: log_print("Data not cached. This will take a minute...") df = pd.read_pickle(trade_file, compression='bz2') # Filter to requested symbols. df = df.loc[symbols] # Filter duplicate indices (trades on two exchanges at the PRECISE same time). Rare. df = df[~df.index.duplicated(keep='first')] # Ensure resulting index is sorted for best performance later on. df = df.sort_index() return df class DataOracle(Oracle): def __init__(self, historical_date=None, symbols: List[str] = None, data_dir=None): self.historical_date = historical_date self.symbols = symbols self.mkt_open = None # Read historical trades here... h = historical_date pre = 'ct' if h.year < 2015 else 'ctm' trade_file = os.path.join(data_dir, 'trades', f'trades_{h.year}', f'{pre}_{h.year}{h.month:02d}{h.day:02d}.bgz') bars_1m_file = os.path.join(data_dir, '1m_ohlc', f'1m_ohlc_{h.year}', f'{h.year}{h.month:02d}{h.day:02d}_ohlc_1m.bgz') log_print("DataOracle initializing trades from file {}", trade_file) log_print("DataOracle initializing 1m bars from file {}", bars_1m_file) then = dt.datetime.now() self.df_trades = read_trades(trade_file, symbols) self.df_bars_1m = read_trades(bars_1m_file, symbols) now = dt.datetime.now() log_print("DataOracle initialized for {} with symbols {}", historical_date, symbols) log_print("DataOracle initialization took {}", now - then) # Return the daily open price for the symbol given. The processing to create the 1m OHLC # files does propagate the earliest trade backwards, which helps. The exchange should # pass its opening time. def getDailyOpenPrice(self, symbol, mkt_open, cents=True): # Remember market open time. self.mkt_open = mkt_open log_print("Oracle: client requested {} at market open: {}", symbol, mkt_open) # Find the opening historical price in the 1m OHLC bars for this symbol. open = self.df_bars_1m.loc[(symbol, mkt_open.time()), 'open'] log_print("Oracle: market open price was was {}", open) return round(open * 100) if cents else open # Return the latest trade price for the symbol at or prior to the given currentTime, # which must be of type pd.Timestamp. def getLatestTrade(self, symbol, currentTime): log_print("Oracle: client requested {} as of {}", symbol, currentTime) # See when the last historical trade was, prior to simulated currentTime. dt_last_trade = self.df_trades.loc[symbol].index.asof(currentTime) if pd.notnull(dt_last_trade): last_trade = self.df_trades.loc[(symbol, dt_last_trade)] price = last_trade['PRICE'] time = dt_last_trade # If we know the market open time, and the last historical trade was before it, use # the market open price instead. If there were no trades before the requested time, # also use the market open price. if pd.isnull(dt_last_trade) or (self.mkt_open and time < self.mkt_open): price = self.getDailyOpenPrice(symbol, self.mkt_open, cents=False) time = self.mkt_open log_print("Oracle: latest historical trade was {} at {}", price, time) return price # Return a noisy observed historical price for agents which have that ability. # currentTime must be of type pd.Timestamp. Only the Exchange or other privileged # agents should use noisy=False. # # NOTE: sigma_n is the observation variance, NOT STANDARD DEVIATION. # # Each agent must pass its own np.random.RandomState object to the oracle. # This helps to preserve the consistency of multiple simulations with experimental # changes (if the oracle used a global Random object, simply adding one new agent # would change everyone's "noise" on all subsequent observations). def observePrice(self, symbol, current_time, sigma_n=0.0001, random_state=None) -> int: last_trade_price = self.getLatestTrade(symbol, current_time) # Noisy belief is a normal distribution around 1% the last trade price with variance # as requested by the agent. if sigma_n == 0: belief = float(last_trade_price) else: belief = random_state.normal(loc=last_trade_price, scale=last_trade_price * sqrt(sigma_n)) log_print("Oracle: giving client value observation {:0.2f}", belief) # All simulator prices are specified in integer cents. return int(round(belief * 100)) class ExternalFileOracle(Oracle): """ Oracle using an external price series as the fundamental. The external series are specified files in the ABIDES config. If an agent requests the fundamental value in between two timestamps the returned fundamental value is linearly interpolated. """ __slots__ = ( "mkt_open", "symbols", "fundamentals", "f_log" ) def __init__(self, symbols): self.mkt_open = None self.symbols = symbols self.fundamentals = self.load_fundamentals() self.f_log = {symbol: [] for symbol in symbols} def load_fundamentals(self): """ Method extracts fundamentals for each symbol into DataFrames. Note that input files must be of the form generated by util/formatting/mid_price_from_orderbook.py. """ fundamentals = {} log_print("Oracle: loading fundamental price series...") for symbol, params_dict in self.symbols.items(): fundamental_file_path = params_dict['fundamental_file_path'] log_print("Oracle: loading {}", fundamental_file_path) fundamental_df = pd.read_pickle(fundamental_file_path) fundamentals.update({symbol: fundamental_df}) log_print("Oracle: loading fundamental price series complete!") return fundamentals def getDailyOpenPrice(self, symbol, mkt_open): # Remember market open time. self.mkt_open = mkt_open log_print("Oracle: client requested {} at market open: {}", symbol, mkt_open) # Find the opening historical price or this symbol. open_price = self.getPriceAtTime(symbol, mkt_open) log_print("Oracle: market open price was was {}", open_price) return int(round(open_price)) def getPriceAtTime(self, symbol, query_time): """ Get the true price of a symbol at the requested time. :param symbol: which symbol to query :type symbol: str :param time: at this time :type time: pd.Timestamp """ log_print("Oracle: client requested {} as of {}", symbol, query_time) fundamental_series = self.fundamentals[symbol] time_of_query = pd.Timestamp(query_time) series_open_time = fundamental_series.index[0] series_close_time = fundamental_series.index[-1] if time_of_query < series_open_time: # time queried before open return fundamental_series[0] elif time_of_query > series_close_time: # time queried after close return fundamental_series[-1] else: # time queried during trading # find indices either side of requested time lower_idx = bisect_left(fundamental_series.index, time_of_query) - 1 upper_idx = lower_idx + 1 if lower_idx < len(fundamental_series.index) - 1 else lower_idx # interpolate between values lower_val = fundamental_series[lower_idx] upper_val = fundamental_series[upper_idx] log_print( f"DEBUG: lower_idx: {lower_idx}, lower_val: {lower_val}, upper_idx: {upper_idx}, upper_val: {upper_val}") interpolated_price = self.getInterpolatedPrice(query_time, fundamental_series.index[lower_idx], fundamental_series.index[upper_idx], lower_val, upper_val) log_print("Oracle: latest historical trade was {} at {}. Next historical trade is {}. " "Interpolated price is {}", lower_val, query_time, upper_val, interpolated_price) self.f_log[symbol].append({'FundamentalTime': query_time, 'FundamentalValue': interpolated_price}) return interpolated_price def observePrice(self, symbol, current_time, sigma_n=0.0001, random_state=None): """ Make observation of price at a given time. :param symbol: symbol for which to observe price :type symbol: str :param current_time: time of observation :type current_time: pd.Timestamp :param sigma_n: Observation noise parameter :type sigma_n: float :param random_state: random state for Agent making observation :type random_state: np.RandomState :return: int, price in cents """ true_price = self.getPriceAtTime(symbol, current_time) if sigma_n == 0: observed = true_price else: observed = random_state.normal(loc=true_price, scale=sqrt(sigma_n)) return int(round(observed)) def getInterpolatedPrice(self, current_time, time_low, time_high, price_low, price_high): """ Get the price at current_time, linearly interpolated between price_low and price_high measured at times time_low and time_high :param current_time: time for which price is to be interpolated :type current_time: pd.Timestamp :param time_low: time of first fundamental value :type time_low: pd.Timestamp :param time_high: time of first fundamental value :type time_high: pd.Timestamp :param price_low: first fundamental value :type price_low: float :param price_high: first fundamental value :type price_high: float :return float of interpolated price: """ log_print( f'DEBUG: current_time: {current_time} time_low {time_low} time_high: {time_high} price_low: {price_low} price_high: {price_high}') delta_y = price_high - price_low delta_x = (time_high - time_low).total_seconds() slope = delta_y / delta_x if price_low != price_high else 0 x_fwd = (current_time - time_low).total_seconds() return price_low + (x_fwd * slope) class MeanRevertingOracle(Oracle): """ The MeanRevertingOracle requires three parameters: a mean fundamental value, a mean reversion coefficient, and a shock variance. It constructs and retains a fundamental value time series for each requested symbol, and provides noisy observations of those values upon agent request. The expectation is that agents using such an oracle will know the mean-reverting equation and all relevant parameters, but will not know the random shocks applied to the sequence at each time step. Historical dates are effectively meaningless to this oracle. It is driven by the numpy random number seed contained within the experimental config file. This oracle uses the nanoseconds portion of the current simulation time as discrete "time steps". A suggestion: to keep wallclock runtime reasonable, have the agents operate for only ~1000 nanoseconds, but interpret nanoseconds as seconds or minutes. """ __slots__ = ( "mkt_open", "mkt_close", "symbols", "r" ) def __init__(self, mkt_open, mkt_close, symbols): # Symbols must be a dictionary of dictionaries with outer keys as symbol names and # inner keys: r_bar, kappa, sigma_s. self.mkt_open = mkt_open self.mkt_close = mkt_close self.symbols = symbols # The dictionary r holds the fundamenal value series for each symbol. self.r = {} then = dt.datetime.now() for symbol in symbols: s = symbols[symbol] log_print("MeanRevertingOracle computing fundamental value series for {}", symbol) self.r[symbol] = self.generate_fundamental_value_series(symbol=symbol, **s) now = dt.datetime.now() log_print("MeanRevertingOracle initialized for symbols {}", symbols) log_print("MeanRevertingOracle initialization took {}", now - then) def generate_fundamental_value_series(self, symbol, r_bar, kappa, sigma_s): # Generates the fundamental value series for a single stock symbol. r_bar is the # mean fundamental value, kappa is the mean reversion coefficient, and sigma_s # is the shock variance. (Note: NOT STANDARD DEVIATION.) # Because the oracle uses the global np.random PRNG to create the fundamental value # series, it is important to create the oracle BEFORE the agents. In this way the # addition of a new agent will not affect the sequence created. (Observations using # the oracle will use an agent's PRNG and thus not cause a problem.) # Turn variance into std. sigma_s = sqrt(sigma_s) # Create the time series into which values will be projected and initialize the first value. date_range =
pd.date_range(self.mkt_open, self.mkt_close, closed='left', freq='N')
pandas.date_range
import unittest import numpy as np from pandas.core.api import Series import pandas.core.algorithms as algos import pandas.util.testing as tm class TestMatch(unittest.TestCase): def test_ints(self): values = np.array([0, 2, 1]) to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0]) result = algos.match(to_match, values) expected = np.array([0, 2, 1, 1, 0, 2, -1, 0]) self.assert_(np.array_equal(result, expected)) def test_strings(self): values = ['foo', 'bar', 'baz'] to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux'] result = algos.match(to_match, values) expected = np.array([1, 0, -1, 0, 1, 2, -1]) self.assert_(np.array_equal(result, expected)) class TestUnique(unittest.TestCase): def test_ints(self): arr = np.random.randint(0, 100, size=50) result = algos.unique(arr) self.assert_(isinstance(result, np.ndarray)) def test_objects(self): arr = np.random.randint(0, 100, size=50).astype('O') result = algos.unique(arr) self.assert_(isinstance(result, np.ndarray)) def test_quantile(): s = Series(np.random.randn(100)) result = algos.quantile(s, [0, .25, .5, .75, 1.]) expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
pandas.util.testing.assert_almost_equal
import vectorbt as vbt import numpy as np import pandas as pd from numba import njit from datetime import datetime import pytest from vectorbt.generic import nb as generic_nb from vectorbt.generic.enums import range_dt from tests.utils import record_arrays_close seed = 42 day_dt = np.timedelta64(86400000000000) mask = pd.DataFrame([ [True, False, False], [False, True, False], [False, False, True], [True, False, False], [False, True, False] ], index=pd.Index([ datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3), datetime(2020, 1, 4), datetime(2020, 1, 5) ]), columns=['a', 'b', 'c']) ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index) price = pd.DataFrame({ 'open': [10, 11, 12, 11, 10], 'high': [11, 12, 13, 12, 11], 'low': [9, 10, 11, 10, 9], 'close': [11, 12, 11, 10, 9] }) group_by = pd.Index(['g1', 'g1', 'g2']) # ############# Global ############# # def setup_module(): vbt.settings.numba['check_func_suffix'] = True vbt.settings.caching.enabled = False vbt.settings.caching.whitelist = [] vbt.settings.caching.blacklist = [] def teardown_module(): vbt.settings.reset() # ############# accessors.py ############# # class TestAccessors: def test_indexing(self): assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total() def test_freq(self): assert mask.vbt.signals.wrapper.freq == day_dt assert mask['a'].vbt.signals.wrapper.freq == day_dt assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2 assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2 assert pd.Series([False, True]).vbt.signals.wrapper.freq is None assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3 assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4 @pytest.mark.parametrize( "test_n", [1, 2, 3, 4, 5], ) def test_fshift(self, test_n): pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False)) np.testing.assert_array_equal( mask['a'].vbt.signals.fshift(test_n).values, generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False) ) pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False)) @pytest.mark.parametrize( "test_n", [1, 2, 3, 4, 5], ) def test_bshift(self, test_n): pd.testing.assert_series_equal( mask['a'].vbt.signals.bshift(test_n), mask['a'].shift(-test_n, fill_value=False)) np.testing.assert_array_equal( mask['a'].vbt.signals.bshift(test_n).values, generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False) ) pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False)) def test_empty(self): pd.testing.assert_series_equal( pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'), pd.Series(np.full(5, False), index=np.arange(10, 15), name='a') ) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']), pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c']) ) pd.testing.assert_series_equal( pd.Series.vbt.signals.empty_like(mask['a']), pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name) ) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.empty_like(mask), pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns) ) def test_generate(self): @njit def choice_func_nb(from_i, to_i, col, n): if col == 0: return np.arange(from_i, to_i) elif col == 1: return np.full(1, from_i) else: return np.full(1, to_i - n) pd.testing.assert_series_equal( pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name), pd.Series( np.array([True, True, True, True, True]), index=mask['a'].index, name=mask['a'].name ) ) with pytest.raises(Exception): _ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.generate( (5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns), pd.DataFrame( np.array([ [True, True, False], [True, False, False], [True, False, False], [True, False, False], [True, False, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.generate( (5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns), pd.DataFrame( np.array([ [True, True, False], [False, False, False], [False, False, False], [False, False, False], [False, False, True] ]), index=mask.index, columns=mask.columns ) ) def test_generate_both(self): @njit def entry_func_nb(from_i, to_i, col, temp_int): temp_int[0] = from_i return temp_int[:1] @njit def exit_func_nb(from_i, to_i, col, temp_int): temp_int[0] = from_i return temp_int[:1] temp_int = np.empty((mask.shape[0],), dtype=np.int_) en, ex = pd.Series.vbt.signals.generate_both( 5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,), index=mask['a'].index, name=mask['a'].name) pd.testing.assert_series_equal( en, pd.Series( np.array([True, False, True, False, True]), index=mask['a'].index, name=mask['a'].name ) ) pd.testing.assert_series_equal( ex, pd.Series( np.array([False, True, False, True, False]), index=mask['a'].index, name=mask['a'].name ) ) en, ex = pd.DataFrame.vbt.signals.generate_both( (5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,), index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [False, False, False], [True, True, True], [False, False, False], [True, True, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [True, True, True], [False, False, False], [True, True, True], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) en, ex = pd.Series.vbt.signals.generate_both( (5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,), index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0) pd.testing.assert_series_equal( en, pd.Series( np.array([True, True, True, True, True]), index=mask['a'].index, name=mask['a'].name ) ) pd.testing.assert_series_equal( ex, pd.Series( np.array([True, True, True, True, True]), index=mask['a'].index, name=mask['a'].name ) ) en, ex = pd.Series.vbt.signals.generate_both( (5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,), index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1) pd.testing.assert_series_equal( en, pd.Series( np.array([True, True, True, True, True]), index=mask['a'].index, name=mask['a'].name ) ) pd.testing.assert_series_equal( ex, pd.Series( np.array([False, True, True, True, True]), index=mask['a'].index, name=mask['a'].name ) ) @njit def entry_func2_nb(from_i, to_i, col, temp_int): temp_int[0] = from_i if from_i + 1 < to_i: temp_int[1] = from_i + 1 return temp_int[:2] return temp_int[:1] @njit def exit_func2_nb(from_i, to_i, col, temp_int): temp_int[0] = from_i if from_i + 1 < to_i: temp_int[1] = from_i + 1 return temp_int[:2] return temp_int[:1] en, ex = pd.DataFrame.vbt.signals.generate_both( (5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,), entry_pick_first=False, exit_pick_first=False, index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [True, True, True], [False, False, False], [False, False, False], [True, True, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [False, False, False], [True, True, True], [True, True, True], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) def test_generate_exits(self): @njit def choice_func_nb(from_i, to_i, col, temp_int): temp_int[0] = from_i return temp_int[:1] temp_int = np.empty((mask.shape[0],), dtype=np.int_) pd.testing.assert_series_equal( mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int), pd.Series( np.array([False, True, False, False, True]), index=mask['a'].index, name=mask['a'].name ) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_exits(choice_func_nb, temp_int), pd.DataFrame( np.array([ [False, False, False], [True, False, False], [False, True, False], [False, False, True], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0), pd.DataFrame( np.array([ [True, False, False], [False, True, False], [False, False, True], [True, False, False], [False, True, False] ]), index=mask.index, columns=mask.columns ) ) @njit def choice_func2_nb(from_i, to_i, col, temp_int): for i in range(from_i, to_i): temp_int[i - from_i] = i return temp_int[:to_i - from_i] pd.testing.assert_frame_equal( mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False), pd.DataFrame( np.array([ [False, False, False], [True, False, False], [True, True, False], [True, True, True], [True, True, True] ]), index=mask.index, columns=mask.columns ) ) mask2 = pd.Series([True, True, True, True, True], index=mask.index) pd.testing.assert_series_equal( mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True), pd.Series( np.array([False, True, False, True, False]), index=mask.index ) ) def test_clean(self): entries = pd.DataFrame([ [True, False, True], [True, False, False], [True, True, True], [False, True, False], [False, True, True] ], index=mask.index, columns=mask.columns) exits = pd.Series([True, False, True, False, True], index=mask.index) pd.testing.assert_frame_equal( entries.vbt.signals.clean(), pd.DataFrame( np.array([ [True, False, True], [False, False, False], [False, True, True], [False, False, False], [False, False, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.clean(entries), pd.DataFrame( np.array([ [True, False, True], [False, False, False], [False, True, True], [False, False, False], [False, False, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( entries.vbt.signals.clean(exits)[0], pd.DataFrame( np.array([ [False, False, False], [True, False, False], [False, False, False], [False, True, False], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( entries.vbt.signals.clean(exits)[1], pd.DataFrame( np.array([ [False, False, False], [False, False, False], [False, False, False], [False, False, False], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( entries.vbt.signals.clean(exits, entry_first=False)[0], pd.DataFrame( np.array([ [False, False, False], [True, False, False], [False, False, False], [False, True, False], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( entries.vbt.signals.clean(exits, entry_first=False)[1], pd.DataFrame( np.array([ [False, True, False], [False, False, False], [False, False, False], [False, False, False], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.clean(entries, exits)[0], pd.DataFrame( np.array([ [False, False, False], [True, False, False], [False, False, False], [False, True, False], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.clean(entries, exits)[1], pd.DataFrame( np.array([ [False, False, False], [False, False, False], [False, False, False], [False, False, False], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) with pytest.raises(Exception): _ = pd.Series.vbt.signals.clean(entries, entries, entries) def test_generate_random(self): pd.testing.assert_series_equal( pd.Series.vbt.signals.generate_random( 5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name), pd.Series( np.array([False, True, True, False, True]), index=mask['a'].index, name=mask['a'].name ) ) with pytest.raises(Exception): _ = pd.Series.vbt.signals.generate_random((5, 2), n=3) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.generate_random( (5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns), pd.DataFrame( np.array([ [False, False, True], [True, True, True], [True, True, False], [False, True, True], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.generate_random( (5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns), pd.DataFrame( np.array([ [False, False, True], [False, False, True], [False, False, False], [False, True, False], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_series_equal( pd.Series.vbt.signals.generate_random( 5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name), pd.Series( np.array([True, False, False, False, True]), index=mask['a'].index, name=mask['a'].name ) ) with pytest.raises(Exception): _ = pd.Series.vbt.signals.generate_random((5, 2), prob=3) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.generate_random( (5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns), pd.DataFrame( np.array([ [True, True, True], [False, True, False], [False, False, False], [False, False, True], [True, False, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.generate_random( (5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns), pd.DataFrame( np.array([ [False, True, True], [False, True, True], [False, False, True], [False, False, True], [False, False, True] ]), index=mask.index, columns=mask.columns ) ) with pytest.raises(Exception): pd.DataFrame.vbt.signals.generate_random((5, 3)) pd.testing.assert_frame_equal( pd.DataFrame.vbt.signals.generate_random( (5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns), pd.DataFrame( np.array([ [False, True, True], [False, False, False], [False, False, False], [False, False, False], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) def test_generate_random_both(self): # n en, ex = pd.Series.vbt.signals.generate_random_both( 5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name) pd.testing.assert_series_equal( en, pd.Series( np.array([True, False, True, False, False]), index=mask['a'].index, name=mask['a'].name ) ) pd.testing.assert_series_equal( ex, pd.Series( np.array([False, True, False, False, True]), index=mask['a'].index, name=mask['a'].name ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both( (5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [False, False, False], [True, True, False], [False, False, True], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [True, True, True], [False, False, False], [False, True, False], [True, False, True] ]), index=mask.index, columns=mask.columns ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both( (5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [False, False, True], [False, True, False], [False, False, False], [False, False, True], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [False, False, True], [False, False, False], [False, True, False], [False, False, True] ]), index=mask.index, columns=mask.columns ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [True, True, True], ]) ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [True, True, True], [True, True, True] ]) ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [True, True, True], [False, False, False] ]) ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [True, True, True], [True, True, True], ]) ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [False, False, False], [False, False, False], [False, False, False], [True, True, True], [False, False, False], [False, False, False] ]) ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [False, False, False], [True, True, True], [False, False, False], [False, False, False], [False, False, False], [True, True, True] ]) ) ) n = 10 a = np.full(n * 2, 0.) for i in range(10000): en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2) _a = np.empty((n * 2,), dtype=np.int_) _a[0::2] = np.flatnonzero(en) _a[1::2] = np.flatnonzero(ex) a += _a greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n) less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2) assert np.all(greater & less) # probs en, ex = pd.Series.vbt.signals.generate_random_both( 5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name) pd.testing.assert_series_equal( en, pd.Series( np.array([True, False, False, False, True]), index=mask['a'].index, name=mask['a'].name ) ) pd.testing.assert_series_equal( ex, pd.Series( np.array([False, True, False, False, False]), index=mask['a'].index, name=mask['a'].name ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both( (5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [False, False, False], [False, False, False], [False, False, True], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [True, True, True], [False, False, False], [False, False, False], [False, False, True] ]), index=mask.index, columns=mask.columns ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both( (5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [False, True, True], [False, False, False], [False, False, True], [False, False, False], [False, False, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [False, True, True], [False, False, False], [False, False, True], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both( (5, 3), entry_prob=1., exit_prob=1., exit_wait=0, seed=seed, index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [True, True, True], [True, True, True], [True, True, True], [True, True, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [True, True, True], [True, True, True], [True, True, True], [True, True, True], [True, True, True] ]), index=mask.index, columns=mask.columns ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both( (5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True, seed=seed, index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [True, True, True], [True, True, True], [True, True, True], [True, True, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [False, False, False], [False, False, False], [False, False, False], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) en, ex = pd.DataFrame.vbt.signals.generate_random_both( (5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False, seed=seed, index=mask.index, columns=mask.columns) pd.testing.assert_frame_equal( en, pd.DataFrame( np.array([ [True, True, True], [False, False, False], [False, False, False], [False, False, False], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( ex, pd.DataFrame( np.array([ [False, False, False], [True, True, True], [True, True, True], [True, True, True], [True, True, True] ]), index=mask.index, columns=mask.columns ) ) # none with pytest.raises(Exception): pd.DataFrame.vbt.signals.generate_random((5, 3)) def test_generate_random_exits(self): pd.testing.assert_series_equal( mask['a'].vbt.signals.generate_random_exits(seed=seed), pd.Series( np.array([False, False, True, False, True]), index=mask['a'].index, name=mask['a'].name ) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_random_exits(seed=seed), pd.DataFrame( np.array([ [False, False, False], [False, False, False], [True, True, False], [False, False, False], [True, False, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_random_exits(seed=seed, wait=0), pd.DataFrame( np.array([ [True, False, False], [False, False, False], [False, True, False], [False, False, True], [True, True, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_series_equal( mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed), pd.Series( np.array([False, True, False, False, True]), index=mask['a'].index, name=mask['a'].name ) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_random_exits(prob=1., seed=seed), pd.DataFrame( np.array([ [False, False, False], [True, False, False], [False, True, False], [False, False, True], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed), pd.DataFrame( np.array([ [False, False, False], [False, False, False], [False, False, False], [False, True, True], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed), pd.DataFrame( np.array([ [True, False, False], [False, True, False], [False, False, True], [True, False, False], [False, True, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed), pd.DataFrame( np.array([ [False, False, False], [True, False, False], [False, True, False], [False, False, True], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) def test_generate_stop_exits(self): e = pd.Series([True, False, False, False, False, False]) t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64) # stop loss pd.testing.assert_series_equal( e.vbt.signals.generate_stop_exits(t, -0.1), pd.Series(np.array([False, False, False, False, False, True])) ) pd.testing.assert_series_equal( e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True), pd.Series(np.array([False, False, False, True, False, False])) ) pd.testing.assert_series_equal( e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False), pd.Series(np.array([False, False, False, True, True, True])) ) pd.testing.assert_frame_equal( e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False), pd.DataFrame(np.array([ [False, False, False], [False, False, False], [False, False, False], [False, False, False], [False, True, False], [False, True, False] ])) ) pd.testing.assert_series_equal( e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3), pd.Series(np.array([False, False, False, False, True, False])) ) # take profit pd.testing.assert_series_equal( e.vbt.signals.generate_stop_exits(4 - t, 0.1), pd.Series(np.array([False, False, False, False, False, True])) ) pd.testing.assert_series_equal( e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True), pd.Series(np.array([False, False, False, True, False, False])) ) pd.testing.assert_series_equal( e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False), pd.Series(np.array([False, False, False, True, True, True])) ) pd.testing.assert_frame_equal( e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False), pd.DataFrame(np.array([ [False, False, False], [False, False, False], [False, False, False], [False, True, True], [False, True, True], [False, True, True] ])) ) pd.testing.assert_series_equal( e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3), pd.Series(np.array([False, False, False, False, True, False])) ) # chain e = pd.Series([True, True, True, True, True, True]) en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True) pd.testing.assert_series_equal( en, pd.Series(np.array([True, False, False, False, True, False])) ) pd.testing.assert_series_equal( ex, pd.Series(np.array([False, False, False, True, False, True])) ) en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True) pd.testing.assert_series_equal( en, pd.Series(np.array([True, False, False, False, False, True])) ) pd.testing.assert_series_equal( ex, pd.Series(np.array([False, False, False, True, False, False])) ) en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True) pd.testing.assert_series_equal( en, pd.Series(np.array([True, False, False, False, True, False])) ) pd.testing.assert_series_equal( ex, pd.Series(np.array([False, False, False, True, False, False])) ) # until_next and pick_first e2 = pd.Series([True, True, True, True, True, True]) t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64) ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False) pd.testing.assert_series_equal( ex, pd.Series(np.array([False, True, True, True, True, True])) ) def test_generate_ohlc_stop_exits(self): with pytest.raises(Exception): _ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1) with pytest.raises(Exception): _ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1) pd.testing.assert_frame_equal( mask.vbt.signals.generate_stop_exits(ts, -0.1), mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True), mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_stop_exits(ts, 0.1), mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_stop_exits(ts, 0.1), mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True), mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True) ) pd.testing.assert_frame_equal( mask.vbt.signals.generate_stop_exits(ts, -0.1), mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True) ) def _test_ohlc_stop_exits(**kwargs): out_dict = {'stop_price': np.nan, 'stop_type': -1} result = mask.vbt.signals.generate_ohlc_stop_exits( price['open'], price['high'], price['low'], price['close'], out_dict=out_dict, **kwargs ) if isinstance(result, tuple): _, ex = result else: ex = result return result, out_dict['stop_price'], out_dict['stop_type'] ex, stop_price, stop_type = _test_ohlc_stop_exits() pd.testing.assert_frame_equal( ex, pd.DataFrame(np.array([ [False, False, False], [False, False, False], [False, False, False], [False, False, False], [False, False, False] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_price, pd.DataFrame(np.array([ [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_type, pd.DataFrame(np.array([ [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1] ]), index=mask.index, columns=mask.columns) ) ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1) pd.testing.assert_frame_equal( ex, pd.DataFrame(np.array([ [False, False, False], [False, False, False], [False, False, False], [False, False, True], [True, False, False] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_price, pd.DataFrame(np.array([ [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, 10.8], [9.9, np.nan, np.nan] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_type, pd.DataFrame(np.array([ [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, 0], [0, -1, -1] ]), index=mask.index, columns=mask.columns) ) ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True) pd.testing.assert_frame_equal( ex, pd.DataFrame(np.array([ [False, False, False], [False, False, False], [False, False, False], [False, True, True], [True, False, False] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_price, pd.DataFrame(np.array([ [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, 11.7, 10.8], [9.9, np.nan, np.nan] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_type, pd.DataFrame(np.array([ [-1, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, 1, 1], [1, -1, -1] ]), index=mask.index, columns=mask.columns) ) ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1) pd.testing.assert_frame_equal( ex, pd.DataFrame(np.array([ [False, False, False], [True, False, False], [False, True, False], [False, False, False], [False, False, False] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_price, pd.DataFrame(np.array([ [np.nan, np.nan, np.nan], [11.0, np.nan, np.nan], [np.nan, 12.1, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_type, pd.DataFrame(np.array([ [-1, -1, -1], [2, -1, -1], [-1, 2, -1], [-1, -1, -1], [-1, -1, -1] ]), index=mask.index, columns=mask.columns) ) ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1) pd.testing.assert_frame_equal( ex, pd.DataFrame(np.array([ [False, False, False], [True, False, False], [False, True, False], [False, False, True], [True, False, False] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_price, pd.DataFrame(np.array([ [np.nan, np.nan, np.nan], [11.0, np.nan, np.nan], [np.nan, 12.1, np.nan], [np.nan, np.nan, 10.8], [9.9, np.nan, np.nan] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_type, pd.DataFrame(np.array([ [-1, -1, -1], [2, -1, -1], [-1, 2, -1], [-1, -1, 1], [1, -1, -1] ]), index=mask.index, columns=mask.columns) ) ex, stop_price, stop_type = _test_ohlc_stop_exits( sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2]) pd.testing.assert_frame_equal( ex, pd.DataFrame(np.array([ [False, False, False], [False, False, False], [False, True, False], [False, False, False], [False, False, True] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_price, pd.DataFrame(np.array([ [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, 12.1, np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, 9.6] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_type, pd.DataFrame(np.array([ [-1, -1, -1], [-1, -1, -1], [-1, 2, -1], [-1, -1, -1], [-1, -1, 1] ]), index=mask.index, columns=mask.columns) ) ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0) pd.testing.assert_frame_equal( ex, pd.DataFrame(np.array([ [True, False, False], [False, False, False], [False, True, False], [False, False, True], [True, True, False] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_price, pd.DataFrame(np.array([ [9.0, np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, 12.1, np.nan], [np.nan, np.nan, 11.7], [10.8, 9.0, np.nan] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_type, pd.DataFrame(np.array([ [1, -1, -1], [-1, -1, -1], [-1, 2, -1], [-1, -1, 1], [1, 1, -1] ]), index=mask.index, columns=mask.columns) ) (en, ex), stop_price, stop_type = _test_ohlc_stop_exits( sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True) pd.testing.assert_frame_equal( en, pd.DataFrame(np.array([ [True, False, False], [False, True, False], [False, False, True], [True, False, False], [False, True, False] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( ex, pd.DataFrame(np.array([ [False, False, False], [True, False, False], [False, True, False], [False, False, True], [True, False, False] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_price, pd.DataFrame(np.array([ [np.nan, np.nan, np.nan], [11.0, np.nan, np.nan], [np.nan, 12.1, np.nan], [np.nan, np.nan, 10.8], [9.9, np.nan, np.nan] ]), index=mask.index, columns=mask.columns) ) pd.testing.assert_frame_equal( stop_type, pd.DataFrame(np.array([ [-1, -1, -1], [2, -1, -1], [-1, 2, -1], [-1, -1, 1], [1, -1, -1] ]), index=mask.index, columns=mask.columns) ) def test_between_ranges(self): ranges = mask.vbt.signals.between_ranges() record_arrays_close( ranges.values, np.array([ (0, 0, 0, 3, 1), (1, 1, 1, 4, 1) ], dtype=range_dt) ) assert ranges.wrapper == mask.vbt.wrapper mask2 = pd.DataFrame([ [True, True, True], [True, True, True], [False, False, False], [False, False, False], [False, False, False] ], index=mask.index, columns=mask.columns) other_mask = pd.DataFrame([ [False, False, False], [True, False, False], [True, True, False], [False, True, True], [False, False, True] ], index=mask.index, columns=mask.columns) ranges = mask2.vbt.signals.between_ranges(other=other_mask) record_arrays_close( ranges.values, np.array([ (0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1), (3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1) ], dtype=range_dt) ) assert ranges.wrapper == mask2.vbt.wrapper ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True) record_arrays_close( ranges.values, np.array([ (0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1), (3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1) ], dtype=range_dt) ) assert ranges.wrapper == mask2.vbt.wrapper def test_partition_ranges(self): mask2 = pd.DataFrame([ [False, False, False], [True, False, False], [True, True, False], [False, True, True], [True, False, True] ], index=mask.index, columns=mask.columns) ranges = mask2.vbt.signals.partition_ranges() record_arrays_close( ranges.values, np.array([ (0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0) ], dtype=range_dt) ) assert ranges.wrapper == mask2.vbt.wrapper def test_between_partition_ranges(self): mask2 = pd.DataFrame([ [True, False, False], [True, True, False], [False, True, True], [True, False, True], [False, True, False] ], index=mask.index, columns=mask.columns) ranges = mask2.vbt.signals.between_partition_ranges() record_arrays_close( ranges.values, np.array([ (0, 0, 1, 3, 1), (1, 1, 2, 4, 1) ], dtype=range_dt) ) assert ranges.wrapper == mask2.vbt.wrapper def test_pos_rank(self): pd.testing.assert_series_equal( (~mask['a']).vbt.signals.pos_rank(), pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.pos_rank(), pd.DataFrame( np.array([ [-1, 0, 0], [0, -1, 1], [1, 0, -1], [-1, 1, 0], [0, -1, 1] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.pos_rank(after_false=True), pd.DataFrame( np.array([ [-1, -1, -1], [0, -1, -1], [1, 0, -1], [-1, 1, 0], [0, -1, 1] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.pos_rank(allow_gaps=True), pd.DataFrame( np.array([ [-1, 0, 0], [0, -1, 1], [1, 1, -1], [-1, 2, 2], [2, -1, 3] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True), pd.DataFrame( np.array([ [-1, 0, 0], [0, -1, 1], [1, 1, -1], [-1, 0, 0], [0, -1, 1] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True), pd.DataFrame( np.array([ [-1, 0, 0], [0, -1, 1], [1, 0, -1], [-1, 1, 0], [0, -1, 1] ]), index=mask.index, columns=mask.columns ) ) def test_partition_pos_rank(self): pd.testing.assert_series_equal( (~mask['a']).vbt.signals.partition_pos_rank(), pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.partition_pos_rank(), pd.DataFrame( np.array([ [-1, 0, 0], [0, -1, 0], [0, 1, -1], [-1, 1, 1], [1, -1, 1] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.partition_pos_rank(after_false=True), pd.DataFrame( np.array([ [-1, -1, -1], [0, -1, -1], [0, 0, -1], [-1, 0, 0], [1, -1, 0] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']), pd.DataFrame( np.array([ [-1, 0, 0], [0, -1, 0], [0, 1, -1], [-1, 0, 0], [0, -1, 0] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.partition_pos_rank(reset_by=mask), pd.DataFrame( np.array([ [-1, 0, 0], [0, -1, 0], [0, 0, -1], [-1, 0, 0], [0, -1, 0] ]), index=mask.index, columns=mask.columns ) ) def test_pos_rank_fns(self): pd.testing.assert_frame_equal( (~mask).vbt.signals.first(), pd.DataFrame( np.array([ [False, True, True], [True, False, False], [False, True, False], [False, False, True], [True, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.nth(1), pd.DataFrame( np.array([ [False, False, False], [False, False, True], [True, False, False], [False, True, False], [False, False, True] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.nth(2), pd.DataFrame( np.array([ [False, False, False], [False, False, False], [False, False, False], [False, False, False], [False, False, False] ]), index=mask.index, columns=mask.columns ) ) pd.testing.assert_frame_equal( (~mask).vbt.signals.from_nth(0), pd.DataFrame( np.array([ [False, True, True], [True, False, True], [True, True, False], [False, True, True], [True, False, True] ]), index=mask.index, columns=mask.columns ) ) def test_pos_rank_mapped(self): mask2 = pd.DataFrame([ [True, False, False], [True, True, False], [False, True, True], [True, False, True], [False, True, False] ], index=mask.index, columns=mask.columns) mapped = mask2.vbt.signals.pos_rank_mapped() np.testing.assert_array_equal( mapped.values, np.array([0, 1, 0, 0, 1, 0, 0, 1]) ) np.testing.assert_array_equal( mapped.col_arr, np.array([0, 0, 0, 1, 1, 1, 2, 2]) ) np.testing.assert_array_equal( mapped.idx_arr, np.array([0, 1, 3, 1, 2, 4, 2, 3]) ) assert mapped.wrapper == mask2.vbt.wrapper def test_partition_pos_rank_mapped(self): mask2 = pd.DataFrame([ [True, False, False], [True, True, False], [False, True, True], [True, False, True], [False, True, False] ], index=mask.index, columns=mask.columns) mapped = mask2.vbt.signals.partition_pos_rank_mapped() np.testing.assert_array_equal( mapped.values, np.array([0, 0, 1, 0, 0, 1, 0, 0]) ) np.testing.assert_array_equal( mapped.col_arr, np.array([0, 0, 0, 1, 1, 1, 2, 2]) ) np.testing.assert_array_equal( mapped.idx_arr, np.array([0, 1, 3, 1, 2, 4, 2, 3]) ) assert mapped.wrapper == mask2.vbt.wrapper def test_nth_index(self): assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00') pd.testing.assert_series_equal( mask.vbt.signals.nth_index(0), pd.Series([ pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-02 00:00:00'), pd.Timestamp('2020-01-03 00:00:00') ], index=mask.columns, name='nth_index', dtype='datetime64[ns]') ) pd.testing.assert_series_equal( mask.vbt.signals.nth_index(-1), pd.Series([ pd.Timestamp('2020-01-04 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'), pd.Timestamp('2020-01-03 00:00:00') ], index=mask.columns, name='nth_index', dtype='datetime64[ns]') ) pd.testing.assert_series_equal( mask.vbt.signals.nth_index(-2), pd.Series([ pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-02 00:00:00'), np.nan ], index=mask.columns, name='nth_index', dtype='datetime64[ns]') ) pd.testing.assert_series_equal( mask.vbt.signals.nth_index(0, group_by=group_by), pd.Series([ pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-03 00:00:00') ], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]') ) pd.testing.assert_series_equal( mask.vbt.signals.nth_index(-1, group_by=group_by), pd.Series([ pd.Timestamp('2020-01-05 00:00:00'), pd.Timestamp('2020-01-03 00:00:00') ], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]') ) def test_norm_avg_index(self): assert mask['a'].vbt.signals.norm_avg_index() == -0.25 pd.testing.assert_series_equal( mask.vbt.signals.norm_avg_index(), pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index') ) pd.testing.assert_series_equal( mask.vbt.signals.norm_avg_index(group_by=group_by), pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index') ) def test_index_mapped(self): mapped = mask.vbt.signals.index_mapped() np.testing.assert_array_equal( mapped.values, np.array([0, 3, 1, 4, 2]) ) np.testing.assert_array_equal( mapped.col_arr, np.array([0, 0, 1, 1, 2]) ) np.testing.assert_array_equal( mapped.idx_arr, np.array([0, 3, 1, 4, 2]) ) assert mapped.wrapper == mask.vbt.wrapper def test_total(self): assert mask['a'].vbt.signals.total() == 2 pd.testing.assert_series_equal( mask.vbt.signals.total(), pd.Series([2, 2, 1], index=mask.columns, name='total') ) pd.testing.assert_series_equal( mask.vbt.signals.total(group_by=group_by), pd.Series([4, 1], index=['g1', 'g2'], name='total') ) def test_rate(self): assert mask['a'].vbt.signals.rate() == 0.4 pd.testing.assert_series_equal( mask.vbt.signals.rate(), pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate') ) pd.testing.assert_series_equal( mask.vbt.signals.rate(group_by=group_by), pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate') ) def test_total_partitions(self): assert mask['a'].vbt.signals.total_partitions() == 2 pd.testing.assert_series_equal( mask.vbt.signals.total_partitions(), pd.Series([2, 2, 1], index=mask.columns, name='total_partitions') ) pd.testing.assert_series_equal( mask.vbt.signals.total_partitions(group_by=group_by), pd.Series([4, 1], index=['g1', 'g2'], name='total_partitions') ) def test_partition_rate(self): assert mask['a'].vbt.signals.partition_rate() == 1.0 pd.testing.assert_series_equal( mask.vbt.signals.partition_rate(), pd.Series([1.0, 1.0, 1.0], index=mask.columns, name='partition_rate') ) pd.testing.assert_series_equal( mask.vbt.signals.partition_rate(group_by=group_by), pd.Series([1.0, 1.0], index=['g1', 'g2'], name='partition_rate') ) def test_stats(self): stats_index = pd.Index([ 'Start', 'End', 'Period', 'Total', 'Rate [%]', 'First Index', 'Last Index', 'Norm Avg Index [-1, 1]', 'Distance: Min', 'Distance: Max', 'Distance: Mean', 'Distance: Std', 'Total Partitions', 'Partition Rate [%]', 'Partition Length: Min', 'Partition Length: Max', 'Partition Length: Mean', 'Partition Length: Std', 'Partition Distance: Min', 'Partition Distance: Max', 'Partition Distance: Mean', 'Partition Distance: Std' ], dtype='object') pd.testing.assert_series_equal( mask.vbt.signals.stats(), pd.Series([ pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'), pd.Timedelta('5 days 00:00:00'), 1.6666666666666667, 33.333333333333336, pd.Timestamp('2020-01-02 00:00:00'), pd.Timestamp('2020-01-04 00:00:00'), 0.0, pd.Timedelta('3 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), np.nan, 1.6666666666666667, 100.0, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), np.nan ], index=stats_index, name='agg_func_mean' ) ) pd.testing.assert_series_equal( mask.vbt.signals.stats(column='a'), pd.Series([ pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'), pd.Timedelta('5 days 00:00:00'), 2, 40.0, pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-04 00:00:00'), -0.25, pd.Timedelta('3 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), np.nan, 2, 100.0, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), pd.Timedelta('3 days 00:00:00'), np.nan ], index=stats_index, name='a' ) ) pd.testing.assert_series_equal( mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False)), pd.Series([ pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00')
pandas.Timestamp
import numpy as np import numpy.testing as npt import pandas as pd import pandas.testing as pdt import pytest def test_col(): from anaphora import Col # attribute access df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}) actual = Col('y').values(df) expected = df['y'].values assert isinstance(actual, np.ndarray) npt.assert_array_equal(actual, expected) # attribute chaining (!!) df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}) actual = Col('y').values(df).dtype expected = df['y'].values.dtype npt.assert_array_equal(actual, expected) # method chaining df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}) actual = Col('y').map({1: '1', 2: '2'}).astype('category')(df) expected = df['y'].map({1: '1', 2: '2'}).astype('category') pdt.assert_series_equal(actual, expected) # magic method chaining df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}) actual = ((Col('y') + 3) * 10)(df) expected = (df['y'] + 3) * 10 pdt.assert_series_equal(actual, expected) # loc, scalar output df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) actual = Col('x').loc['c'](df) expected = 6 assert int(actual) == expected # loc, vector output df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) actual = Col('x').loc[['a','c']](df) expected = pd.Series([4,6], index=['a','c'], name='x') pdt.assert_series_equal(actual, expected) def test_with_column(): from anaphora import Col, with_column # replace a column df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) actual = with_column(df, 'x', Col() * 10) expected = df.copy() expected['x'] = df['x'] * 10 pdt.assert_frame_equal(actual, expected) # add a column df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) actual = with_column(df, 'z', Col('x') * 10) expected = df.copy() expected['z'] = df['x'] * 10 pdt.assert_frame_equal(actual, expected) # subset with scalar loc df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) actual = with_column(df, 'z', Col('y') * 10, loc='b') expected = df.copy() expected.loc['b', 'z'] = df.loc['b', 'y'] * 10 pdt.assert_frame_equal(actual, expected) # subset with scalar iloc df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) actual = with_column(df, 'z', Col('y') * 10, iloc=1) expected = df.copy() expected.loc[expected.index[1], 'z'] = df['y'].iloc[1] * 10 pdt.assert_frame_equal(actual, expected) # subset with vector loc df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) actual = with_column(df, 'z', Col('y') * 10, loc=['a', 'b']) expected = df.copy() expected.loc[['a', 'b'], 'z'] = df.loc[['a', 'b'], 'y'] * 10 pdt.assert_frame_equal(actual, expected) # subset with vector iloc df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) actual = with_column(df, 'z', Col('y') * 10, iloc=[1, 2]) expected = df.copy() expected.loc[expected.index[[1, 2]], 'z'] = df['y'].iloc[[1,2]] * 10 pdt.assert_frame_equal(actual, expected) # no-name shortcut df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) assert pd.testing.assert_frame_equal( with_column(df, 'y', Col('y') * 10, iloc=[1, 2]), with_column(df, 'y', Col() * 10, iloc=[1, 2]) ) with pytest.raises(KeyError) as exc_info: with_column(df, 'z', Col() * 10, iloc=[1, 2]) assert str(exc_info.value) == 'z' # don't mutate original df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc')) df2 = with_column(df, 'y', Col('y') * 10, iloc=[1, 2]), with pytest.raises(AssertionError): pd.testing.assert_frame_equal(df, df2) def test_mutate(): from anaphora import Col, mutate, mutate_sequential df =
pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
pandas.DataFrame
import numpy as np import pandas as pd array = [1,2,np.nan,4,5,np.nan,np.nan,1,2,2,np.nan,np.nan,4] df_array =
pd.DataFrame(array)
pandas.DataFrame
import zmq import threading import os import os.path import pandas as pd UPDATE_PUBLISHER = "tcp://viirscollector:19191" PICKLE_DIR = "/viirs/pickle" UPDATE_PICKLE = os.path.join(PICKLE_DIR, "task_queue.pickle") class UpdateSubscriber(threading.Thread): def __init__(self, context): threading.Thread.__init__(self) self.socket = context.socket(zmq.SUB) self.socket.setsockopt(zmq.TCP_KEEPALIVE, 1) self.socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 60) self.socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, 20) self.socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, 60) self.socket.setsockopt_string(zmq.SUBSCRIBE, "") self.socket.connect(UPDATE_PUBLISHER) self.lock = threading.Lock() self.initialize() def initialize(self): if os.path.exists(UPDATE_PICKLE): print("loading {}".format(UPDATE_PICKLE)) self._waiting_tasks =
pd.read_pickle(UPDATE_PICKLE)
pandas.read_pickle
"""Container objects for results of CmdStan run(s).""" import copy import glob import logging import math import os import re import shutil from collections import Counter, OrderedDict from datetime import datetime from time import time from typing import Dict, List, Tuple, Union import numpy as np import pandas as pd try: import xarray as xr XARRAY_INSTALLED = True except ImportError: XARRAY_INSTALLED = False from cmdstanpy import _CMDSTAN_SAMPLING, _CMDSTAN_THIN, _CMDSTAN_WARMUP, _TMPDIR from cmdstanpy.cmdstan_args import ( CmdStanArgs, Method, OptimizeArgs, SamplerArgs, VariationalArgs, ) from cmdstanpy.utils import ( EXTENSION, check_sampler_csv, cmdstan_path, cmdstan_version_at, create_named_text_file, do_command, get_logger, parse_method_vars, parse_stan_vars, scan_config, scan_generated_quantities_csv, scan_optimize_csv, scan_variational_csv, ) class RunSet: """ Encapsulates the configuration and results of a call to any CmdStan inference method. Records the method return code and locations of all console, error, and output files. """ def __init__( self, args: CmdStanArgs, chains: int = 4, chain_ids: List[int] = None, logger: logging.Logger = None, ) -> None: """Initialize object.""" self._args = args self._chains = chains self._logger = logger or get_logger() if chains < 1: raise ValueError( 'chains must be positive integer value, ' 'found {}'.format(chains) ) if chain_ids is None: chain_ids = [x + 1 for x in range(chains)] elif len(chain_ids) != chains: raise ValueError( 'mismatch between number of chains and chain_ids, ' 'found {} chains, but {} chain_ids'.format( chains, len(chain_ids) ) ) self._chain_ids = chain_ids self._retcodes = [-1 for _ in range(chains)] # stdout, stderr are written to text files # prefix: ``<model_name>-<YYYYMMDDHHMM>-<chain_id>`` # suffixes: ``-stdout.txt``, ``-stderr.txt`` now = datetime.now() now_str = now.strftime('%Y%m%d%H%M') file_basename = '-'.join([args.model_name, now_str]) if args.output_dir is not None: output_dir = args.output_dir else: output_dir = _TMPDIR self._csv_files = [None for _ in range(chains)] self._diagnostic_files = [None for _ in range(chains)] self._profile_files = [None for _ in range(chains)] self._stdout_files = [None for _ in range(chains)] self._stderr_files = [None for _ in range(chains)] self._cmds = [] for i in range(chains): if args.output_dir is None: csv_file = create_named_text_file( dir=output_dir, prefix='{}-{}-'.format(file_basename, str(chain_ids[i])), suffix='.csv', ) else: csv_file = os.path.join( output_dir, '{}-{}.{}'.format(file_basename, str(chain_ids[i]), 'csv'), ) self._csv_files[i] = csv_file stdout_file = ''.join( [os.path.splitext(csv_file)[0], '-stdout.txt'] ) self._stdout_files[i] = stdout_file stderr_file = ''.join( [os.path.splitext(csv_file)[0], '-stderr.txt'] ) self._stderr_files[i] = stderr_file # optional output files: diagnostics, profiling if args.save_diagnostics: if args.output_dir is None: diag_file = create_named_text_file( dir=_TMPDIR, prefix='{}-diagnostic-{}-'.format( file_basename, str(chain_ids[i]) ), suffix='.csv', ) else: diag_file = os.path.join( output_dir, '{}-diagnostic-{}.{}'.format( file_basename, str(chain_ids[i]), 'csv' ), ) self._diagnostic_files[i] = diag_file if args.save_profile: if args.output_dir is None: profile_file = create_named_text_file( dir=_TMPDIR, prefix='{}-profile-{}-'.format( file_basename, str(chain_ids[i]) ), suffix='.csv', ) else: profile_file = os.path.join( output_dir, '{}-profile-{}.{}'.format( file_basename, str(chain_ids[i]), 'csv' ), ) self._profile_files[i] = profile_file if args.save_diagnostics and args.save_profile: self._cmds.append( args.compose_command( i, self._csv_files[i], diagnostic_file=self._diagnostic_files[i], profile_file=self._profile_files[i], ) ) elif args.save_diagnostics: self._cmds.append( args.compose_command( i, self._csv_files[i], diagnostic_file=self._diagnostic_files[i], ) ) elif args.save_profile: self._cmds.append( args.compose_command( i, self._csv_files[i], profile_file=self._profile_files[i], ) ) else: self._cmds.append(args.compose_command(i, self._csv_files[i])) def __repr__(self) -> str: repr = 'RunSet: chains={}'.format(self._chains) repr = '{}\n cmd:\n\t{}'.format(repr, self._cmds[0]) repr = '{}\n retcodes={}'.format(repr, self._retcodes) if os.path.exists(self._csv_files[0]): repr = '{}\n csv_files:\n\t{}'.format( repr, '\n\t'.join(self._csv_files) ) if self._args.save_diagnostics and os.path.exists( self._diagnostic_files[0] ): repr = '{}\n diagnostics_files:\n\t{}'.format( repr, '\n\t'.join(self._diagnostic_files) ) if self._args.save_profile and os.path.exists(self._profile_files[0]): repr = '{}\n profile_files:\n\t{}'.format( repr, '\n\t'.join(self._profile_files) ) if os.path.exists(self._stdout_files[0]): repr = '{}\n console_msgs:\n\t{}'.format( repr, '\n\t'.join(self._stdout_files) ) if os.path.exists(self._stderr_files[0]): repr = '{}\n error_msgs:\n\t{}'.format( repr, '\n\t'.join(self._stderr_files) ) return repr @property def model(self) -> str: """Stan model name.""" return self._args.model_name @property def method(self) -> Method: """CmdStan method used to generate this fit.""" return self._args.method @property def chains(self) -> int: """Number of chains.""" return self._chains @property def chain_ids(self) -> List[int]: """Chain ids.""" return self._chain_ids @property def cmds(self) -> List[str]: """List of call(s) to CmdStan, one call per-chain.""" return self._cmds @property def csv_files(self) -> List[str]: """List of paths to CmdStan output files.""" return self._csv_files @property def stdout_files(self) -> List[str]: """List of paths to CmdStan stdout transcripts.""" return self._stdout_files @property def stderr_files(self) -> List[str]: """List of paths to CmdStan stderr transcripts.""" return self._stderr_files def _check_retcodes(self) -> bool: """Returns ``True`` when all chains have retcode 0.""" for i in range(self._chains): if self._retcodes[i] != 0: return False return True @property def diagnostic_files(self) -> List[str]: """List of paths to CmdStan hamiltonian diagnostic files.""" return self._diagnostic_files @property def profile_files(self) -> List[str]: """List of paths to CmdStan profiler files.""" return self._profile_files def _retcode(self, idx: int) -> int: """Get retcode for chain[idx].""" return self._retcodes[idx] def _set_retcode(self, idx: int, val: int) -> None: """Set retcode for chain[idx] to val.""" self._retcodes[idx] = val def get_err_msgs(self) -> List[str]: """Checks console messages for each chain.""" msgs = [] for i in range(self._chains): if ( os.path.exists(self._stderr_files[i]) and os.stat(self._stderr_files[i]).st_size > 0 ): with open(self._stderr_files[i], 'r') as fd: msgs.append( 'chain_id {}:\n{}\n'.format( self._chain_ids[i], fd.read() ) ) # pre 2.27, all msgs sent to stdout, including errors if ( not cmdstan_version_at(2, 27) and os.path.exists(self._stdout_files[i]) and os.stat(self._stdout_files[i]).st_size > 0 ): with open(self._stdout_files[i], 'r') as fd: contents = fd.read() # pattern matches initial "Exception" or "Error" msg pat = re.compile(r'^E[rx].*$', re.M) errors = re.findall(pat, contents) if len(errors) > 0: msgs.append( 'chain_id {}:\n\t{}\n'.format( self._chain_ids[i], '\n\t'.join(errors) ) ) return '\n'.join(msgs) def save_csvfiles(self, dir: str = None) -> None: """ Moves csvfiles to specified directory. :param dir: directory path """ if dir is None: dir = os.path.realpath('.') test_path = os.path.join(dir, str(time())) try: os.makedirs(dir, exist_ok=True) with open(test_path, 'w'): pass os.remove(test_path) # cleanup except (IOError, OSError, PermissionError) as exc: raise Exception('cannot save to path: {}'.format(dir)) from exc for i in range(self.chains): if not os.path.exists(self._csv_files[i]): raise ValueError( 'cannot access csv file {}'.format(self._csv_files[i]) ) path, filename = os.path.split(self._csv_files[i]) if path == _TMPDIR: # cleanup tmpstr in filename root, ext = os.path.splitext(filename) rlist = root.split('-') root = '-'.join(rlist[:-1]) filename = ''.join([root, ext]) to_path = os.path.join(dir, filename) if os.path.exists(to_path): raise ValueError( 'file exists, not overwriting: {}'.format(to_path) ) try: self._logger.debug( 'saving tmpfile: "%s" as: "%s"', self._csv_files[i], to_path ) shutil.move(self._csv_files[i], to_path) self._csv_files[i] = to_path except (IOError, OSError, PermissionError) as e: raise ValueError( 'cannot save to file: {}'.format(to_path) ) from e class InferenceMetadata: """ CmdStan configuration and contents of output file parsed out of the Stan CSV file header comments and column headers. Assumes valid CSV files. """ def __init__(self, config: Dict) -> None: """Initialize object from CSV headers""" self._cmdstan_config = config self._method_vars_cols = parse_method_vars(names=config['column_names']) stan_vars_dims, stan_vars_cols = parse_stan_vars( names=config['column_names'] ) self._stan_vars_dims = stan_vars_dims self._stan_vars_cols = stan_vars_cols def __repr__(self) -> str: return 'Metadata:\n{}\n'.format(self._cmdstan_config) @property def cmdstan_config(self) -> Dict: return copy.deepcopy(self._cmdstan_config) @property def method_vars_cols(self) -> Dict[str, Tuple[int, ...]]: """ Returns a map from a Stan inference method variable to a tuple of column indices in inference engine's output array. Sampler variable names always end in `__`, e.g. `lp__`. Uses deepcopy for immutability. """ return copy.deepcopy(self._method_vars_cols) @property def stan_vars_cols(self) -> Dict[str, Tuple[int, ...]]: """ Returns a map from a Stan program variable name to a tuple of the column indices in the vector or matrix of estimates produced by a CmdStan inference method. Uses deepcopy for immutability. """ return copy.deepcopy(self._stan_vars_cols) @property def stan_vars_dims(self) -> Dict[str, Tuple[int, ...]]: """ Returns map from Stan program variable names to variable dimensions. Scalar types are mapped to the empty tuple, e.g., program variable ``int foo`` has dimesion ``()`` and program variable ``vector[10] bar`` has single dimension ``(10)``. Uses deepcopy for immutability. """ return copy.deepcopy(self._stan_vars_dims) class CmdStanMCMC: """ Container for outputs from CmdStan sampler run. Provides methods to summarize and diagnose the model fit and accessor methods to access the entire sample or individual items. The sample is lazily instantiated on first access of either the resulting sample or the HMC tuning parameters, i.e., the step size and metric. The sample can viewed either as a 2D array of draws from all chains by sampler and model variables, or as a 3D array of draws by chains by variables. """ # pylint: disable=too-many-public-methods def __init__( self, runset: RunSet, logger: logging.Logger = None, ) -> None: """Initialize object.""" if not runset.method == Method.SAMPLE: raise ValueError( 'Wrong runset method, expecting sample runset, ' 'found method {}'.format(runset.method) ) self.runset = runset self._logger = logger or get_logger() # info from runset to be exposed self._iter_sampling = runset._args.method_args.iter_sampling if self._iter_sampling is None: self._iter_sampling = _CMDSTAN_SAMPLING self._iter_warmup = runset._args.method_args.iter_warmup if self._iter_warmup is None: self._iter_warmup = _CMDSTAN_WARMUP self._thin = runset._args.method_args.thin if self._thin is None: self._thin = _CMDSTAN_THIN self._is_fixed_param = runset._args.method_args.fixed_param self._save_warmup = runset._args.method_args.save_warmup self._sig_figs = runset._args.sig_figs # info from CSV values, instantiated lazily self._metric = None self._step_size = None self._draws = None self._draws_pd = None # info from CSV initial comments and header config = self._validate_csv_files() self._metadata = InferenceMetadata(config) def __repr__(self) -> str: repr = 'CmdStanMCMC: model={} chains={}{}'.format( self.runset.model, self.runset.chains, self.runset._args.method_args.compose(0, cmd=[]), ) repr = '{}\n csv_files:\n\t{}\n output_files:\n\t{}'.format( repr, '\n\t'.join(self.runset.csv_files), '\n\t'.join(self.runset.stdout_files), ) # TODO - hamiltonian, profiling files return repr @property def chains(self) -> int: """Number of chains.""" return self.runset.chains @property def chain_ids(self) -> List[int]: """Chain ids.""" return self.runset.chain_ids @property def num_draws_warmup(self) -> int: """Number of warmup draws per chain, i.e., thinned warmup iterations.""" return int(math.ceil((self._iter_warmup) / self._thin)) @property def num_draws_sampling(self) -> int: """ Number of sampling (post-warmup) draws per chain, i.e., thinned sampling iterations. """ return int(math.ceil((self._iter_sampling) / self._thin)) @property def metadata(self) -> InferenceMetadata: """ Returns object which contains CmdStan configuration as well as information about the names and structure of the inference method and model output variables. """ return self._metadata @property def sampler_vars_cols(self) -> Dict: """ Deprecated - use "metadata.method_vars_cols" instead """ self._logger.warning( 'property "sampler_vars_cols" has been deprecated, ' 'use "metadata.method_vars_cols" instead.' ) return self.metadata.method_vars_cols @property def stan_vars_cols(self) -> Dict: """ Deprecated - use "metadata.stan_vars_cols" instead """ self._logger.warning( 'property "stan_vars_cols" has been deprecated, ' 'use "metadata.stan_vars_cols" instead.' ) return self.metadata.method_vars_cols @property def stan_vars_dims(self) -> Dict: """ Deprecated - use "metadata.stan_vars_dims" instead """ self._logger.warning( 'property "stan_vars_dims" has been deprecated, ' 'use "metadata.stan_vars_dims" instead.' ) return self.metadata.stan_vars_dims @property def column_names(self) -> Tuple[str, ...]: """ Names of all outputs from the sampler, comprising sampler parameters and all components of all model parameters, transformed parameters, and quantities of interest. Corresponds to Stan CSV file header row, with names munged to array notation, e.g. `beta[1]` not `beta.1`. """ return self._metadata.cmdstan_config['column_names'] @property def num_unconstrained_params(self) -> int: """ Count of _unconstrained_ model parameters. This is the metric size; for metric `diag_e`, the length of the diagonal vector, for metric `dense_e` this is the size of the full covariance matrix. If the parameter variables in a model are constrained parameter types, the number of constrained and unconstrained parameters may differ. The sampler reports the constrained parameters and computes with the unconstrained parameters. E.g. a model with 2 parameter variables, ``real alpha`` and ``vector[3] beta`` has 4 constrained and 4 unconstrained parameters, however a model with variables ``real alpha`` and ``simplex[3] beta`` has 4 constrained and 3 unconstrained parameters. """ return self._metadata.cmdstan_config['num_unconstrained_params'] @property def metric_type(self) -> str: """ Metric type used for adaptation, either 'diag_e' or 'dense_e'. When sampler algorithm 'fixed_param' is specified, metric_type is None. """ if self._is_fixed_param: return None return self._metadata.cmdstan_config['metric'] # cmdstan arg name @property def metric(self) -> np.ndarray: """ Metric used by sampler for each chain. When sampler algorithm 'fixed_param' is specified, metric is None. """ if self._is_fixed_param: return None if self._metric is None: self._assemble_draws() return self._metric @property def step_size(self) -> np.ndarray: """ Step size used by sampler for each chain. When sampler algorithm 'fixed_param' is specified, step size is None. """ if self._is_fixed_param: return None if self._step_size is None: self._assemble_draws() return self._step_size @property def thin(self) -> int: """ Period between recorded iterations. (Default is 1). """ return self._thin def draws( self, *, inc_warmup: bool = False, concat_chains: bool = False ) -> np.ndarray: """ Returns a numpy.ndarray over all draws from all chains which is stored column major so that the values for a parameter are contiguous in memory, likewise all draws from a chain are contiguous. By default, returns a 3D array arranged (draws, chains, columns); parameter ``concat_chains=True`` will return a 2D array where all chains are flattened into a single column, although underlyingly, given M chains of N draws, the first N draws are from chain 1, up through the last N draws from chain M. :param inc_warmup: When ``True`` and the warmup draws are present in the output, i.e., the sampler was run with ``save_warmup=True``, then the warmup draws are included. Default value is ``False``. :param concat_chains: When ``True`` return a 2D array flattening all all draws from all chains. Default value is ``False``. """ if self._draws is None: self._assemble_draws() if inc_warmup and not self._save_warmup: self._logger.warning( 'draws from warmup iterations not available,' ' must run sampler with "save_warmup=True".' ) num_rows = self._draws.shape[0] start_idx = 0 if not inc_warmup and self._save_warmup: start_idx = self.num_draws_warmup num_rows -= start_idx if concat_chains: num_rows *= self.chains return self._draws[start_idx:, :, :].reshape( (num_rows, len(self.column_names)), order='F' ) return self._draws[start_idx:, :, :] @property def sample(self) -> np.ndarray: """ Deprecated - use method "draws()" instead. """ self._logger.warning( 'method "sample" has been deprecated, use method "draws" instead.' ) return self.draws() @property def warmup(self) -> np.ndarray: """ Deprecated - use "draws(inc_warmup=True)" """ self._logger.warning( 'method "warmup" has been deprecated, instead use method' ' "draws(inc_warmup=True)", returning draws from both' ' warmup and sampling iterations.' ) return self.draws(inc_warmup=True) def _validate_csv_files(self) -> dict: """ Checks that Stan CSV output files for all chains are consistent and returns dict containing config and column names. Raises exception when inconsistencies detected. """ dzero = {} for i in range(self.chains): if i == 0: dzero = check_sampler_csv( path=self.runset.csv_files[i], is_fixed_param=self._is_fixed_param, iter_sampling=self._iter_sampling, iter_warmup=self._iter_warmup, save_warmup=self._save_warmup, thin=self._thin, ) else: drest = check_sampler_csv( path=self.runset.csv_files[i], is_fixed_param=self._is_fixed_param, iter_sampling=self._iter_sampling, iter_warmup=self._iter_warmup, save_warmup=self._save_warmup, thin=self._thin, ) for key in dzero: if ( key not in [ 'id', 'diagnostic_file', 'metric_file', 'profile_file', 'stepsize', 'init', 'seed', 'start_datetime', ] and dzero[key] != drest[key] ): raise ValueError( 'CmdStan config mismatch in Stan CSV file {}: ' 'arg {} is {}, expected {}'.format( self.runset.csv_files[i], key, dzero[key], drest[key], ) ) return dzero def _assemble_draws(self) -> None: """ Allocates and populates the step size, metric, and sample arrays by parsing the validated stan_csv files. """ if self._draws is not None: return num_draws = self.num_draws_sampling sampling_iter_start = 0 if self._save_warmup: num_draws += self.num_draws_warmup sampling_iter_start = self.num_draws_warmup self._draws = np.empty( (num_draws, self.chains, len(self.column_names)), dtype=float, order='F', ) if not self._is_fixed_param: self._step_size = np.empty(self.chains, dtype=float) if self.metric_type == 'diag_e': self._metric = np.empty( (self.chains, self.num_unconstrained_params), dtype=float ) else: self._metric = np.empty( ( self.chains, self.num_unconstrained_params, self.num_unconstrained_params, ), dtype=float, ) for chain in range(self.chains): with open(self.runset.csv_files[chain], 'r') as fd: # skip initial comments, up to columns header line = fd.readline().strip() while len(line) > 0 and line.startswith('#'): line = fd.readline().strip() # at columns header if not self._is_fixed_param: if self._save_warmup: for i in range(self.num_draws_warmup): line = fd.readline().strip() xs = line.split(',') self._draws[i, chain, :] = [float(x) for x in xs] # read to adaptation msg line = fd.readline().strip() if line != '# Adaptation terminated': while line != '# Adaptation terminated': line = fd.readline().strip() line = fd.readline().strip() # step_size _, step_size = line.split('=') self._step_size[chain] = float(step_size.strip()) line = fd.readline().strip() # metric header # process metric if self.metric_type == 'diag_e': line = fd.readline().lstrip(' #\t').strip() xs = line.split(',') self._metric[chain, :] = [float(x) for x in xs] else: for i in range(self.num_unconstrained_params): line = fd.readline().lstrip(' #\t').strip() xs = line.split(',') self._metric[chain, i, :] = [float(x) for x in xs] # process draws for i in range(sampling_iter_start, num_draws): line = fd.readline().strip() xs = line.split(',') self._draws[i, chain, :] = [float(x) for x in xs] def summary( self, percentiles: List[int] = None, sig_figs: int = None ) -> pd.DataFrame: """ Run cmdstan/bin/stansummary over all output csv files, assemble summary into DataFrame object; first row contains summary statistics for total joint log probability `lp__`, remaining rows contain summary statistics for all parameters, transformed parameters, and generated quantities variables listed in the order in which they were declared in the Stan program. :param percentiles: Ordered non-empty list of percentiles to report. Must be integers from (1, 99), inclusive. :param sig_figs: Number of significant figures to report. Must be an integer between 1 and 18. If unspecified, the default precision for the system file I/O is used; the usual value is 6. If precision above 6 is requested, sample must have been produced by CmdStan version 2.25 or later and sampler output precision must equal to or greater than the requested summary precision. :return: pandas.DataFrame """ percentiles_str = '--percentiles=5,50,95' if percentiles is not None: if len(percentiles) == 0: raise ValueError( 'invalid percentiles argument, must be ordered' ' non-empty list from (1, 99), inclusive.' ) cur_pct = 0 for pct in percentiles: if pct > 99 or not pct > cur_pct: raise ValueError( 'invalid percentiles spec, must be ordered' ' non-empty list from (1, 99), inclusive.' ) cur_pct = pct percentiles_str = '='.join( ['--percentiles', ','.join([str(x) for x in percentiles])] ) sig_figs_str = '--sig_figs=2' if sig_figs is not None: if not isinstance(sig_figs, int) or sig_figs < 1 or sig_figs > 18: raise ValueError( 'sig_figs must be an integer between 1 and 18,' ' found {}'.format(sig_figs) ) csv_sig_figs = self._sig_figs or 6 if sig_figs > csv_sig_figs: self._logger.warning( 'Requesting %d significant digits of output, but CSV files' ' only have %d digits of precision.', sig_figs, csv_sig_figs, ) sig_figs_str = '--sig_figs=' + str(sig_figs) cmd_path = os.path.join( cmdstan_path(), 'bin', 'stansummary' + EXTENSION ) tmp_csv_file = 'stansummary-{}-'.format(self.runset._args.model_name) tmp_csv_path = create_named_text_file( dir=_TMPDIR, prefix=tmp_csv_file, suffix='.csv', name_only=True ) csv_str = '--csv_filename={}'.format(tmp_csv_path) if not cmdstan_version_at(2, 24): csv_str = '--csv_file={}'.format(tmp_csv_path) cmd = [ cmd_path, percentiles_str, sig_figs_str, csv_str, ] + self.runset.csv_files do_command(cmd, logger=self.runset._logger) with open(tmp_csv_path, 'rb') as fd: summary_data = pd.read_csv( fd, delimiter=',', header=0, index_col=0, comment='#', float_precision='high', ) mask = [x == 'lp__' or not x.endswith('__') for x in summary_data.index] return summary_data[mask] def diagnose(self) -> str: """ Run cmdstan/bin/diagnose over all output csv files. Returns output of diagnose (stdout/stderr). The diagnose utility reads the outputs of all chains and checks for the following potential problems: + Transitions that hit the maximum treedepth + Divergent transitions + Low E-BFMI values (sampler transitions HMC potential energy) + Low effective sample sizes + High R-hat values """ cmd_path = os.path.join(cmdstan_path(), 'bin', 'diagnose' + EXTENSION) cmd = [cmd_path] + self.runset.csv_files result = do_command(cmd=cmd, logger=self.runset._logger) if result: self.runset._logger.info(result) return result def draws_pd( self, params: List[str] = None, inc_warmup: bool = False ) -> pd.DataFrame: """ Returns the sampler draws as a pandas DataFrame. Flattens all chains into single column. :param params: optional list of variable names. :param inc_warmup: When ``True`` and the warmup draws are present in the output, i.e., the sampler was run with ``save_warmup=True``, then the warmup draws are included. Default value is ``False``. """ if inc_warmup and not self._save_warmup: self._logger.warning( 'draws from warmup iterations not available,' ' must run sampler with "save_warmup=True".' ) self._assemble_draws() mask = [] if params is not None: for param in set(params): if ( param not in self.metadata.method_vars_cols and param not in self.metadata.stan_vars_cols ): raise ValueError('unknown parameter: {}'.format(param)) if param in self.metadata.method_vars_cols: mask.append(param) else: for idx in self.metadata.stan_vars_cols[param]: mask.append(self.column_names[idx]) num_draws = self.num_draws_sampling if inc_warmup and self._save_warmup: num_draws += self.num_draws_warmup num_rows = num_draws * self.chains if self._draws_pd is None or self._draws_pd.shape[0] != num_rows: # pylint: disable=redundant-keyword-arg data = self.draws(inc_warmup=inc_warmup).reshape( (num_rows, len(self.column_names)), order='F' ) self._draws_pd = pd.DataFrame(data=data, columns=self.column_names) if params is None: return self._draws_pd return self._draws_pd[mask] def draws_xr( self, vars: List[str] = None, inc_warmup: bool = False ) -> "xr.Dataset": """ Returns the sampler draws as a xarray Dataset. :param vars: optional list of variable names. :param inc_warmup: When ``True`` and the warmup draws are present in the output, i.e., the sampler was run with ``save_warmup=True``, then the warmup draws are included. Default value is ``False``. """ if not XARRAY_INSTALLED: raise RuntimeError( "xarray is not installed, cannot produce draws array" ) if inc_warmup and not self._save_warmup: self._logger.warning( "draws from warmup iterations not available," ' must run sampler with "save_warmup=True".' ) if vars is None: vars = self.stan_vars_dims.keys() self._assemble_draws() num_draws = self.num_draws_sampling meta = self._metadata.cmdstan_config attrs = { "stan_version": f"{meta['stan_version_major']}." f"{meta['stan_version_minor']}.{meta['stan_version_patch']}", "model": meta["model"], "num_unconstrained_params": self.num_unconstrained_params, "num_draws_sampling": num_draws, } if inc_warmup and self._save_warmup: num_draws += self.num_draws_warmup attrs["num_draws_warmup"] = self.num_draws_warmup data = {} coordinates = {"chain": self.chain_ids, "draw": np.arange(num_draws)} dims = ("draw", "chain") for var in vars: draw1 = 0 if not inc_warmup and self._save_warmup: draw1 = self.num_draws_warmup col_idxs = self._metadata.stan_vars_cols[var] var_dims = dims + tuple( f"{var}_dim_{i}" for i in range(len(self.stan_vars_dims[var])) ) if self.stan_vars_dims[var] == (): data[var] = ( var_dims, np.squeeze(self._draws[draw1:, :, col_idxs], axis=2), ) else: data[var] = (var_dims, self._draws[draw1:, :, col_idxs]) return xr.Dataset(data, coords=coordinates, attrs=attrs).transpose( 'chain', 'draw', ... ) def stan_variable(self, name: str, inc_warmup: bool = False) -> np.ndarray: """ Return a numpy.ndarray which contains the set of draws for the named Stan program variable. Flattens the chains, leaving the draws in chain order. The first array dimension, corresponds to number of draws or post-warmup draws in the sample, per argument ``inc_warmup``. The remaining dimensions correspond to the shape of the Stan program variable. Underlyingly draws are in chain order, i.e., for a sample with N chains of M draws each, the first M array elements are from chain 1, the next M are from chain 2, and the last M elements are from chain N. * If the variable is a scalar variable, the return array has shape ( draws X chains, 1). * If the variable is a vector, the return array has shape ( draws X chains, len(vector)) * If the variable is a matrix, the return array has shape ( draws X chains, size(dim 1) X size(dim 2) ) * If the variable is an array with N dimensions, the return array has shape ( draws X chains, size(dim 1) X ... X size(dim N)) For example, if the Stan program variable ``theta`` is a 3x3 matrix, and the sample consists of 4 chains with 1000 post-warmup draws, this function will return a numpy.ndarray with shape (4000,3,3). :param name: variable name :param inc_warmup: When ``True`` and the warmup draws are present in the output, i.e., the sampler was run with ``save_warmup=True``, then the warmup draws are included. Default value is ``False``. """ if name not in self._metadata.stan_vars_dims: raise ValueError('unknown name: {}'.format(name)) self._assemble_draws() draw1 = 0 if not inc_warmup and self._save_warmup: draw1 = self.num_draws_warmup num_draws = self.num_draws_sampling if inc_warmup and self._save_warmup: num_draws += self.num_draws_warmup dims = [num_draws * self.chains] col_idxs = self._metadata.stan_vars_cols[name] if len(col_idxs) > 0: dims.extend(self._metadata.stan_vars_dims[name]) # pylint: disable=redundant-keyword-arg return self._draws[draw1:, :, col_idxs].reshape(dims, order='F') def stan_variables(self) -> Dict[str, np.ndarray]: """ Return a dictionary mapping Stan program variables names to the corresponding numpy.ndarray containing the inferred values. """ result = {} for name in self._metadata.stan_vars_dims.keys(): result[name] = self.stan_variable(name) return result def method_variables(self) -> Dict: """ Returns a dictionary of all sampler variables, i.e., all output column names ending in `__`. Assumes that all variables are scalar variables where column name is variable name. Maps each column name to a numpy.ndarray (draws x chains x 1) containing per-draw diagnostic values. """ result = {} self._assemble_draws() for idxs in self.metadata.method_vars_cols.values(): for idx in idxs: result[self.column_names[idx]] = self._draws[:, :, idx] return result def sampler_variables(self) -> Dict: """ Deprecated, use "method_variables" instead """ self._logger.warning( 'method "sampler_variables" has been deprecated, ' 'use method "method_variables" instead.' ) return self.method_variables() def sampler_diagnostics(self) -> Dict: """ Deprecated, use "method_variables" instead """ self._logger.warning( 'method "sampler_diagnostics" has been deprecated, ' 'use method "method_variables" instead.' ) return self.method_variables() def save_csvfiles(self, dir: str = None) -> None: """ Move output csvfiles to specified directory. If files were written to the temporary session directory, clean filename. E.g., save 'bernoulli-201912081451-1-5nm6as7u.csv' as 'bernoulli-201912081451-1.csv'. :param dir: directory path """ self.runset.save_csvfiles(dir) class CmdStanMLE: """ Container for outputs from CmdStan optimization. """ def __init__(self, runset: RunSet) -> None: """Initialize object.""" if not runset.method == Method.OPTIMIZE: raise ValueError( 'Wrong runset method, expecting optimize runset, ' 'found method {}'.format(runset.method) ) self.runset = runset self._metadata = None self._column_names = () self._mle = {} self._set_mle_attrs(runset.csv_files[0]) def __repr__(self) -> str: repr = 'CmdStanMLE: model={}{}'.format( self.runset.model, self.runset._args.method_args.compose(0, cmd=[]) ) repr = '{}\n csv_file:\n\t{}\n output_file:\n\t{}'.format( repr, '\n\t'.join(self.runset.csv_files), '\n\t'.join(self.runset.stdout_files), ) # TODO - profiling files return repr def _set_mle_attrs(self, sample_csv_0: str) -> None: meta = scan_optimize_csv(sample_csv_0) self._metadata = InferenceMetadata(meta) self._column_names = meta['column_names'] self._mle = meta['mle'] @property def column_names(self) -> Tuple[str, ...]: """ Names of estimated quantities, includes joint log probability, and all parameters, transformed parameters, and generated quantitites. """ return self._column_names @property def metadata(self) -> InferenceMetadata: """ Returns object which contains CmdStan configuration as well as information about the names and structure of the inference method and model output variables. """ return self._metadata @property def optimized_params_np(self) -> np.ndarray: """Returns optimized params as numpy array.""" return np.asarray(self._mle) @property def optimized_params_pd(self) -> pd.DataFrame: """Returns optimized params as pandas DataFrame.""" return pd.DataFrame([self._mle], columns=self.column_names) @property def optimized_params_dict(self) -> OrderedDict: """Returns optimized params as Dict.""" return OrderedDict(zip(self.column_names, self._mle)) def stan_variable(self, name: str) -> np.ndarray: """ Return a numpy.ndarray which contains the estimates for the for the named Stan program variable where the dimensions of the numpy.ndarray match the shape of the Stan program variable. :param name: variable name """ if name not in self._metadata.stan_vars_dims: raise ValueError('unknown name: {}'.format(name)) col_idxs = list(self._metadata.stan_vars_cols[name]) vals = list(self._mle) xs = [vals[x] for x in col_idxs] shape = () if len(col_idxs) > 0: shape = self._metadata.stan_vars_dims[name] return np.array(xs).reshape(shape) def stan_variables(self) -> Dict[str, np.ndarray]: """ Return a dictionary mapping Stan program variables names to the corresponding numpy.ndarray containing the inferred values. """ result = {} for name in self._metadata.stan_vars_dims.keys(): result[name] = self.stan_variable(name) return result def save_csvfiles(self, dir: str = None) -> None: """ Move output csvfiles to specified directory. If files were written to the temporary session directory, clean filename. E.g., save 'bernoulli-201912081451-1-5nm6as7u.csv' as 'bernoulli-201912081451-1.csv'. :param dir: directory path """ self.runset.save_csvfiles(dir) class CmdStanGQ: """ Container for outputs from CmdStan generate_quantities run. """ def __init__(self, runset: RunSet, mcmc_sample: pd.DataFrame) -> None: """Initialize object.""" if not runset.method == Method.GENERATE_QUANTITIES: raise ValueError( 'Wrong runset method, expecting generate_quantities runset, ' 'found method {}'.format(runset.method) ) self.runset = runset self._metadata = None self.mcmc_sample = mcmc_sample self._generated_quantities = None self._column_names = scan_generated_quantities_csv( self.runset.csv_files[0] )['column_names'] def __repr__(self) -> str: repr = 'CmdStanGQ: model={} chains={}{}'.format( self.runset.model, self.chains, self.runset._args.method_args.compose(0, cmd=[]), ) repr = '{}\n csv_files:\n\t{}\n output_files:\n\t{}'.format( repr, '\n\t'.join(self.runset.csv_files), '\n\t'.join(self.runset.stdout_files), ) return repr @property def chains(self) -> int: """Number of chains.""" return self.runset.chains @property def column_names(self) -> Tuple[str, ...]: """ Names of generated quantities of interest. """ return self._column_names @property def generated_quantities(self) -> np.ndarray: """ A 2D numpy ndarray which contains generated quantities draws for all chains where the columns correspond to the generated quantities block variables and the rows correspond to the draws from all chains, where first M draws are the first M draws of chain 1 and the last M draws are the last M draws of chain N, i.e., flattened chain, draw ordering. """ if not self.runset.method == Method.GENERATE_QUANTITIES: raise ValueError('Bad runset method {}.'.format(self.runset.method)) if self._generated_quantities is None: self._assemble_generated_quantities() return self._generated_quantities @property def generated_quantities_pd(self) -> pd.DataFrame: """ Returns the generated quantities as a pandas DataFrame consisting of one column per quantity of interest and one row per draw. """ if not self.runset.method == Method.GENERATE_QUANTITIES: raise ValueError('Bad runset method {}.'.format(self.runset.method)) if self._generated_quantities is None: self._assemble_generated_quantities() return pd.DataFrame( data=self._generated_quantities, columns=self.column_names ) @property def sample_plus_quantities(self) -> pd.DataFrame: """ Returns the column-wise concatenation of the input drawset with generated quantities drawset. If there are duplicate columns in both the input and the generated quantities, the input column is dropped in favor of the recomputed values in the generate quantities drawset. """ if not self.runset.method == Method.GENERATE_QUANTITIES: raise ValueError('Bad runset method {}.'.format(self.runset.method)) if self._generated_quantities is None: self._assemble_generated_quantities() cols_1 = self.mcmc_sample.columns.tolist() cols_2 = self.generated_quantities_pd.columns.tolist() dups = [ item for item, count in Counter(cols_1 + cols_2).items() if count > 1 ] return pd.concat( [self.mcmc_sample.drop(columns=dups), self.generated_quantities_pd], axis=1, ) def _assemble_generated_quantities(self) -> None: drawset_list = [] for chain in range(self.chains): drawset_list.append( pd.read_csv( self.runset.csv_files[chain], comment='#', float_precision='high', dtype=float, ) ) self._generated_quantities = pd.concat(drawset_list).values def save_csvfiles(self, dir: str = None) -> None: """ Move output csvfiles to specified directory. If files were written to the temporary session directory, clean filename. E.g., save 'bernoulli-201912081451-1-5nm6as7u.csv' as 'bernoulli-201912081451-1.csv'. :param dir: directory path """ self.runset.save_csvfiles(dir) class CmdStanVB: """ Container for outputs from CmdStan variational run. """ def __init__(self, runset: RunSet) -> None: """Initialize object.""" if not runset.method == Method.VARIATIONAL: raise ValueError( 'Wrong runset method, expecting variational inference, ' 'found method {}'.format(runset.method) ) self.runset = runset self._metadata = None self._column_names = () self._variational_mean = {} self._variational_sample = None self._set_variational_attrs(runset.csv_files[0]) def __repr__(self) -> str: repr = 'CmdStanVB: model={}{}'.format( self.runset.model, self.runset._args.method_args.compose(0, cmd=[]) ) repr = '{}\n csv_file:\n\t{}\n output_file:\n\t{}'.format( repr, '\n\t'.join(self.runset.csv_files), '\n\t'.join(self.runset.stdout_files), ) # TODO - diagnostic, profiling files return repr def _set_variational_attrs(self, sample_csv_0: str) -> None: meta = scan_variational_csv(sample_csv_0) self._metadata = InferenceMetadata(meta) self._column_names = meta['column_names'] self._variational_mean = meta['variational_mean'] self._variational_sample = meta['variational_sample'] @property def columns(self) -> int: """ Total number of information items returned by sampler. Includes approximation information and names of model parameters and computed quantities. """ return len(self._column_names) @property def column_names(self) -> Tuple[str, ...]: """ Names of information items returned by sampler for each draw. Includes approximation information and names of model parameters and computed quantities. """ return self._column_names @property def variational_params_np(self) -> np.ndarray: """Returns inferred parameter means as numpy array.""" return self._variational_mean @property def variational_params_pd(self) -> pd.DataFrame: """Returns inferred parameter means as pandas DataFrame.""" return
pd.DataFrame([self._variational_mean], columns=self.column_names)
pandas.DataFrame
from __future__ import division from datetime import timedelta from functools import partial import itertools from nose.tools import assert_true from parameterized import parameterized import numpy as np from numpy.testing import assert_array_equal, assert_almost_equal import pandas as pd from toolz import merge from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor from zipline.pipeline.common import ( EVENT_DATE_FIELD_NAME, FISCAL_QUARTER_FIELD_NAME, FISCAL_YEAR_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME, ) from zipline.pipeline.data import DataSet from zipline.pipeline.data import Column from zipline.pipeline.domain import EquitySessionDomain import platform if platform.system() != 'Windows': from zipline.pipeline.loaders.blaze.estimates import ( BlazeNextEstimatesLoader, BlazeNextSplitAdjustedEstimatesLoader, BlazePreviousEstimatesLoader, BlazePreviousSplitAdjustedEstimatesLoader, ) from zipline.pipeline.loaders.earnings_estimates import ( INVALID_NUM_QTRS_MESSAGE, NextEarningsEstimatesLoader, NextSplitAdjustedEarningsEstimatesLoader, normalize_quarters, PreviousEarningsEstimatesLoader, PreviousSplitAdjustedEarningsEstimatesLoader, split_normalized_quarters, ) from zipline.testing.fixtures import ( WithAdjustmentReader, WithTradingSessions, ZiplineTestCase, ) from zipline.testing.predicates import assert_equal, assert_raises_regex from zipline.testing.predicates import assert_frame_equal from zipline.utils.numpy_utils import datetime64ns_dtype from zipline.utils.numpy_utils import float64_dtype import platform import unittest class Estimates(DataSet): event_date = Column(dtype=datetime64ns_dtype) fiscal_quarter = Column(dtype=float64_dtype) fiscal_year = Column(dtype=float64_dtype) estimate = Column(dtype=float64_dtype) class MultipleColumnsEstimates(DataSet): event_date = Column(dtype=datetime64ns_dtype) fiscal_quarter = Column(dtype=float64_dtype) fiscal_year = Column(dtype=float64_dtype) estimate1 = Column(dtype=float64_dtype) estimate2 = Column(dtype=float64_dtype) def QuartersEstimates(announcements_out): class QtrEstimates(Estimates): num_announcements = announcements_out name = Estimates return QtrEstimates def MultipleColumnsQuartersEstimates(announcements_out): class QtrEstimates(MultipleColumnsEstimates): num_announcements = announcements_out name = Estimates return QtrEstimates def QuartersEstimatesNoNumQuartersAttr(num_qtr): class QtrEstimates(Estimates): name = Estimates return QtrEstimates def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date): """ Given a list of tuples of new data we get for each sid on each critical date (when information changes), create a DataFrame that fills that data through a date range ending at `end_date`. """ df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, 'estimate', 'knowledge_date']) df = df.pivot_table(columns=SID_FIELD_NAME, values='estimate', index='knowledge_date') df = df.reindex( pd.date_range(start_date, end_date) ) # Index name is lost during reindex. df.index = df.index.rename('knowledge_date') df['at_date'] = end_date.tz_localize('utc') df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill() new_sids = set(sids) - set(df.columns) df = df.reindex(columns=df.columns.union(new_sids)) return df class WithEstimates(WithTradingSessions, WithAdjustmentReader): """ ZiplineTestCase mixin providing cls.loader and cls.events as class level fixtures. Methods ------- make_loader(events, columns) -> PipelineLoader Method which returns the loader to be used throughout tests. events : pd.DataFrame The raw events to be used as input to the pipeline loader. columns : dict[str -> str] The dictionary mapping the names of BoundColumns to the associated column name in the events DataFrame. make_columns() -> dict[BoundColumn -> str] Method which returns a dictionary of BoundColumns mapped to the associated column names in the raw data. """ # Short window defined in order for test to run faster. START_DATE = pd.Timestamp('2014-12-28') END_DATE = pd.Timestamp('2015-02-04') @classmethod def make_loader(cls, events, columns): raise NotImplementedError('make_loader') @classmethod def make_events(cls): raise NotImplementedError('make_events') @classmethod def get_sids(cls): return cls.events[SID_FIELD_NAME].unique() @classmethod def make_columns(cls): return { Estimates.event_date: 'event_date', Estimates.fiscal_quarter: 'fiscal_quarter', Estimates.fiscal_year: 'fiscal_year', Estimates.estimate: 'estimate' } def make_engine(self, loader=None): if loader is None: loader = self.loader return SimplePipelineEngine( lambda x: loader, self.asset_finder, default_domain=EquitySessionDomain( self.trading_days, self.ASSET_FINDER_COUNTRY_CODE, ), ) @classmethod def init_class_fixtures(cls): cls.events = cls.make_events() cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids() cls.ASSET_FINDER_EQUITY_SYMBOLS = [ 's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS ] # We need to instantiate certain constants needed by supers of # `WithEstimates` before we call their `init_class_fixtures`. super(WithEstimates, cls).init_class_fixtures() cls.columns = cls.make_columns() # Some tests require `WithAdjustmentReader` to be set up by the time we # make the loader. cls.loader = cls.make_loader(cls.events, {column.name: val for column, val in cls.columns.items()}) class WithOneDayPipeline(WithEstimates): """ ZiplineTestCase mixin providing cls.events as a class level fixture and defining a test for all inheritors to use. Attributes ---------- events : pd.DataFrame A simple DataFrame with columns needed for estimates and a single sid and no other data. Tests ------ test_wrong_num_announcements_passed() Tests that loading with an incorrect quarter number raises an error. test_no_num_announcements_attr() Tests that the loader throws an AssertionError if the dataset being loaded has no `num_announcements` attribute. """ @classmethod def make_columns(cls): return { MultipleColumnsEstimates.event_date: 'event_date', MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter', MultipleColumnsEstimates.fiscal_year: 'fiscal_year', MultipleColumnsEstimates.estimate1: 'estimate1', MultipleColumnsEstimates.estimate2: 'estimate2' } @classmethod def make_events(cls): return pd.DataFrame({ SID_FIELD_NAME: [0] * 2, TS_FIELD_NAME: [pd.Timestamp('2015-01-01'), pd.Timestamp('2015-01-06')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'), pd.Timestamp('2015-01-20')], 'estimate1': [1., 2.], 'estimate2': [3., 4.], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: [2015, 2015] }) @classmethod def make_expected_out(cls): raise NotImplementedError('make_expected_out') @classmethod def init_class_fixtures(cls): super(WithOneDayPipeline, cls).init_class_fixtures() cls.sid0 = cls.asset_finder.retrieve_asset(0) cls.expected_out = cls.make_expected_out() def test_load_one_day(self): # We want to test multiple columns dataset = MultipleColumnsQuartersEstimates(1) engine = self.make_engine() results = engine.run_pipeline( Pipeline({c.name: c.latest for c in dataset.columns}), start_date=pd.Timestamp('2015-01-15', tz='utc'), end_date=pd.Timestamp('2015-01-15', tz='utc'), ) assert_frame_equal(results, self.expected_out) class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase): """ Tests that previous quarter loader correctly breaks if an incorrect number of quarters is passed. """ @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) @classmethod def make_expected_out(cls): return pd.DataFrame( { EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'), 'estimate1': 1., 'estimate2': 3., FISCAL_QUARTER_FIELD_NAME: 1., FISCAL_YEAR_FIELD_NAME: 2015., }, index=pd.MultiIndex.from_tuples( ((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),) ) ) class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase): """ Tests that next quarter loader correctly breaks if an incorrect number of quarters is passed. """ @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) @classmethod def make_expected_out(cls): return pd.DataFrame( { EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'), 'estimate1': 2., 'estimate2': 4., FISCAL_QUARTER_FIELD_NAME: 2., FISCAL_YEAR_FIELD_NAME: 2015., }, index=pd.MultiIndex.from_tuples( ((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),) ) ) dummy_df = pd.DataFrame({SID_FIELD_NAME: 0}, columns=[SID_FIELD_NAME, TS_FIELD_NAME, EVENT_DATE_FIELD_NAME, FISCAL_QUARTER_FIELD_NAME, FISCAL_YEAR_FIELD_NAME, 'estimate'], index=[0]) class WithWrongLoaderDefinition(WithEstimates): """ ZiplineTestCase mixin providing cls.events as a class level fixture and defining a test for all inheritors to use. Attributes ---------- events : pd.DataFrame A simple DataFrame with columns needed for estimates and a single sid and no other data. Tests ------ test_wrong_num_announcements_passed() Tests that loading with an incorrect quarter number raises an error. test_no_num_announcements_attr() Tests that the loader throws an AssertionError if the dataset being loaded has no `num_announcements` attribute. """ @classmethod def make_events(cls): return dummy_df def test_wrong_num_announcements_passed(self): bad_dataset1 = QuartersEstimates(-1) bad_dataset2 = QuartersEstimates(-2) good_dataset = QuartersEstimates(1) engine = self.make_engine() columns = {c.name + str(dataset.num_announcements): c.latest for dataset in (bad_dataset1, bad_dataset2, good_dataset) for c in dataset.columns} p = Pipeline(columns) with self.assertRaises(ValueError) as e: engine.run_pipeline( p, start_date=self.trading_days[0], end_date=self.trading_days[-1], ) assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2") def test_no_num_announcements_attr(self): dataset = QuartersEstimatesNoNumQuartersAttr(1) engine = self.make_engine() p = Pipeline({c.name: c.latest for c in dataset.columns}) with self.assertRaises(AttributeError): engine.run_pipeline( p, start_date=self.trading_days[0], end_date=self.trading_days[-1], ) class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase): """ Tests that previous quarter loader correctly breaks if an incorrect number of quarters is passed. """ @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase): """ Tests that next quarter loader correctly breaks if an incorrect number of quarters is passed. """ @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) options = ["split_adjustments_loader", "split_adjusted_column_names", "split_adjusted_asof"] class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase): """ Test class that tests that loaders break correctly when incorrectly instantiated. Tests ----- test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader) A test that checks that the loader correctly breaks when an unexpected column is passed in the list of split-adjusted columns. """ @classmethod def init_class_fixtures(cls): super(WithEstimates, cls).init_class_fixtures() @parameterized.expand(itertools.product( (NextSplitAdjustedEarningsEstimatesLoader, PreviousSplitAdjustedEarningsEstimatesLoader), )) def test_extra_splits_columns_passed(self, loader): columns = { Estimates.event_date: 'event_date', Estimates.fiscal_quarter: 'fiscal_quarter', Estimates.fiscal_year: 'fiscal_year', Estimates.estimate: 'estimate' } with self.assertRaises(ValueError): loader(dummy_df, {column.name: val for column, val in columns.items()}, split_adjustments_loader=self.adjustment_reader, split_adjusted_column_names=["estimate", "extra_col"], split_adjusted_asof=pd.Timestamp("2015-01-01")) class WithEstimatesTimeZero(WithEstimates): """ ZiplineTestCase mixin providing cls.events as a class level fixture and defining a test for all inheritors to use. Attributes ---------- cls.events : pd.DataFrame Generated dynamically in order to test inter-leavings of estimates and event dates for multiple quarters to make sure that we select the right immediate 'next' or 'previous' quarter relative to each date - i.e., the right 'time zero' on the timeline. We care about selecting the right 'time zero' because we use that to calculate which quarter's data needs to be returned for each day. Methods ------- get_expected_estimate(q1_knowledge, q2_knowledge, comparable_date) -> pd.DataFrame Retrieves the expected estimate given the latest knowledge about each quarter and the date on which the estimate is being requested. If there is no expected estimate, returns an empty DataFrame. Tests ------ test_estimates() Tests that we get the right 'time zero' value on each day for each sid and for each column. """ # Shorter date range for performance END_DATE = pd.Timestamp('2015-01-28') q1_knowledge_dates = [pd.Timestamp('2015-01-01'), pd.Timestamp('2015-01-04'), pd.Timestamp('2015-01-07'), pd.Timestamp('2015-01-11')] q2_knowledge_dates = [pd.Timestamp('2015-01-14'), pd.Timestamp('2015-01-17'), pd.Timestamp('2015-01-20'), pd.Timestamp('2015-01-23')] # We want to model the possibility of an estimate predicting a release date # that doesn't match the actual release. This could be done by dynamically # generating more combinations with different release dates, but that # significantly increases the amount of time it takes to run the tests. # These hard-coded cases are sufficient to know that we can update our # beliefs when we get new information. q1_release_dates = [pd.Timestamp('2015-01-13'), pd.Timestamp('2015-01-14')] # One day late q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early pd.Timestamp('2015-01-26')] @classmethod def make_events(cls): """ In order to determine which estimate we care about for a particular sid, we need to look at all estimates that we have for that sid and their associated event dates. We define q1 < q2, and thus event1 < event2 since event1 occurs during q1 and event2 occurs during q2 and we assume that there can only be 1 event per quarter. We assume that there can be multiple estimates per quarter leading up to the event. We assume that estimates will not surpass the relevant event date. We will look at 2 estimates for an event before the event occurs, since that is the simplest scenario that covers the interesting edge cases: - estimate values changing - a release date changing - estimates for different quarters interleaving Thus, we generate all possible inter-leavings of 2 estimates per quarter-event where estimate1 < estimate2 and all estimates are < the relevant event and assign each of these inter-leavings to a different sid. """ sid_estimates = [] sid_releases = [] # We want all permutations of 2 knowledge dates per quarter. it = enumerate( itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4) ) for sid, (q1e1, q1e2, q2e1, q2e2) in it: # We're assuming that estimates must come before the relevant # release. if (q1e1 < q1e2 and q2e1 < q2e2 and # All estimates are < Q2's event, so just constrain Q1 # estimates. q1e1 < cls.q1_release_dates[0] and q1e2 < cls.q1_release_dates[0]): sid_estimates.append(cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)) sid_releases.append(cls.create_releases_df(sid)) return pd.concat(sid_estimates + sid_releases).reset_index(drop=True) @classmethod def get_sids(cls): sids = cls.events[SID_FIELD_NAME].unique() # Tack on an extra sid to make sure that sids with no data are # included but have all-null columns. return list(sids) + [max(sids) + 1] @classmethod def create_releases_df(cls, sid): # Final release dates never change. The quarters have very tight date # ranges in order to reduce the number of dates we need to iterate # through when testing. return pd.DataFrame({ TS_FIELD_NAME: [pd.Timestamp('2015-01-13'), pd.Timestamp('2015-01-26')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'), pd.Timestamp('2015-01-26')], 'estimate': [0.5, 0.8], FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0], FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0], SID_FIELD_NAME: sid }) @classmethod def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid): return pd.DataFrame({ EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates, 'estimate': [.1, .2, .3, .4], FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0], FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0], TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2], SID_FIELD_NAME: sid, }) def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date): return pd.DataFrame() def test_estimates(self): dataset = QuartersEstimates(1) engine = self.make_engine() results = engine.run_pipeline( Pipeline({c.name: c.latest for c in dataset.columns}), start_date=self.trading_days[1], end_date=self.trading_days[-2], ) for sid in self.ASSET_FINDER_EQUITY_SIDS: sid_estimates = results.xs(sid, level=1) # Separate assertion for all-null DataFrame to avoid setting # column dtypes on `all_expected`. if sid == max(self.ASSET_FINDER_EQUITY_SIDS): assert_true(sid_estimates.isnull().all().all()) else: ts_sorted_estimates = self.events[ self.events[SID_FIELD_NAME] == sid ].sort_values(TS_FIELD_NAME) q1_knowledge = ts_sorted_estimates[ ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1 ] q2_knowledge = ts_sorted_estimates[ ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2 ] all_expected = pd.concat( [self.get_expected_estimate( q1_knowledge[q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)], q2_knowledge[q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)], date.tz_localize(None), ).set_index([[date]]) for date in sid_estimates.index], axis=0) assert_equal(all_expected[sid_estimates.columns], sid_estimates) class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date): # If our latest knowledge of q1 is that the release is # happening on this simulation date or later, then that's # the estimate we want to use. if (not q1_knowledge.empty and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date): return q1_knowledge.iloc[-1:] # If q1 has already happened or we don't know about it # yet and our latest knowledge indicates that q2 hasn't # happened yet, then that's the estimate we want to use. elif (not q2_knowledge.empty and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date): return q2_knowledge.iloc[-1:] return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date]) @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazeNextEstimateLoaderTestCase(NextEstimate): """ Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader. """ @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazeNextEstimatesLoader( bz.data(events), columns, ) @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date): # The expected estimate will be for q2 if the last thing # we've seen is that the release date already happened. # Otherwise, it'll be for q1, as long as the release date # for q1 has already happened. if (not q2_knowledge.empty and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date): return q2_knowledge.iloc[-1:] elif (not q1_knowledge.empty and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date): return q1_knowledge.iloc[-1:] return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date]) @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazePreviousEstimateLoaderTestCase(PreviousEstimate): """ Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader. """ @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazePreviousEstimatesLoader( bz.data(events), columns, ) @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class WithEstimateMultipleQuarters(WithEstimates): """ ZiplineTestCase mixin providing cls.events, cls.make_expected_out as class-level fixtures and self.test_multiple_qtrs_requested as a test. Attributes ---------- events : pd.DataFrame Simple DataFrame with estimates for 2 quarters for a single sid. Methods ------- make_expected_out() --> pd.DataFrame Returns the DataFrame that is expected as a result of running a Pipeline where estimates are requested for multiple quarters out. fill_expected_out(expected) Fills the expected DataFrame with data. Tests ------ test_multiple_qtrs_requested() Runs a Pipeline that calculate which estimates for multiple quarters out and checks that the returned columns contain data for the correct number of quarters out. """ @classmethod def make_events(cls): return pd.DataFrame({ SID_FIELD_NAME: [0] * 2, TS_FIELD_NAME: [pd.Timestamp('2015-01-01'), pd.Timestamp('2015-01-06')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'), pd.Timestamp('2015-01-20')], 'estimate': [1., 2.], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: [2015, 2015] }) @classmethod def init_class_fixtures(cls): super(WithEstimateMultipleQuarters, cls).init_class_fixtures() cls.expected_out = cls.make_expected_out() @classmethod def make_expected_out(cls): expected = pd.DataFrame(columns=[cls.columns[col] + '1' for col in cls.columns] + [cls.columns[col] + '2' for col in cls.columns], index=cls.trading_days) for (col, raw_name), suffix in itertools.product( cls.columns.items(), ('1', '2') ): expected_name = raw_name + suffix if col.dtype == datetime64ns_dtype: expected[expected_name] = pd.to_datetime( expected[expected_name] ) else: expected[expected_name] = expected[ expected_name ].astype(col.dtype) cls.fill_expected_out(expected) return expected.reindex(cls.trading_days) def test_multiple_qtrs_requested(self): dataset1 = QuartersEstimates(1) dataset2 = QuartersEstimates(2) engine = self.make_engine() results = engine.run_pipeline( Pipeline( merge([{c.name + '1': c.latest for c in dataset1.columns}, {c.name + '2': c.latest for c in dataset2.columns}]) ), start_date=self.trading_days[0], end_date=self.trading_days[-1], ) q1_columns = [col.name + '1' for col in self.columns] q2_columns = [col.name + '2' for col in self.columns] # We now expect a column for 1 quarter out and a column for 2 # quarters out for each of the dataset columns. assert_equal(sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)) assert_equal(self.expected_out.sort_index(axis=1), results.xs(0, level=1).sort_index(axis=1)) class NextEstimateMultipleQuarters( WithEstimateMultipleQuarters, ZiplineTestCase ): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) @classmethod def fill_expected_out(cls, expected): # Fill columns for 1 Q out for raw_name in cls.columns.values(): expected.loc[ pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'), raw_name + '1' ] = cls.events[raw_name].iloc[0] expected.loc[ pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'), raw_name + '1' ] = cls.events[raw_name].iloc[1] # Fill columns for 2 Q out # We only have an estimate and event date for 2 quarters out before # Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs # out. for col_name in ['estimate', 'event_date']: expected.loc[ pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'), col_name + '2' ] = cls.events[col_name].iloc[1] # But we know what FQ and FY we'd need in both Q1 and Q2 # because we know which FQ is next and can calculate from there expected.loc[ pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'), FISCAL_QUARTER_FIELD_NAME + '2' ] = 2 expected.loc[ pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'), FISCAL_QUARTER_FIELD_NAME + '2' ] = 3 expected.loc[ pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'), FISCAL_YEAR_FIELD_NAME + '2' ] = 2015 return expected @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazeNextEstimatesLoader( bz.data(events), columns, ) class PreviousEstimateMultipleQuarters( WithEstimateMultipleQuarters, ZiplineTestCase ): @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) @classmethod def fill_expected_out(cls, expected): # Fill columns for 1 Q out for raw_name in cls.columns.values(): expected[raw_name + '1'].loc[ pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19') ] = cls.events[raw_name].iloc[0] expected[raw_name + '1'].loc[ pd.Timestamp('2015-01-20'): ] = cls.events[raw_name].iloc[1] # Fill columns for 2 Q out for col_name in ['estimate', 'event_date']: expected[col_name + '2'].loc[ pd.Timestamp('2015-01-20'): ] = cls.events[col_name].iloc[0] expected[ FISCAL_QUARTER_FIELD_NAME + '2' ].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4 expected[ FISCAL_YEAR_FIELD_NAME + '2' ].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014 expected[ FISCAL_QUARTER_FIELD_NAME + '2' ].loc[pd.Timestamp('2015-01-20'):] = 1 expected[ FISCAL_YEAR_FIELD_NAME + '2' ].loc[pd.Timestamp('2015-01-20'):] = 2015 return expected @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazePreviousEstimatesLoader( bz.data(events), columns, ) class WithVaryingNumEstimates(WithEstimates): """ ZiplineTestCase mixin providing fixtures and a test to ensure that we have the correct overwrites when the event date changes. We want to make sure that if we have a quarter with an event date that gets pushed back, we don't start overwriting for the next quarter early. Likewise, if we have a quarter with an event date that gets pushed forward, we want to make sure that we start applying adjustments at the appropriate, earlier date, rather than the later date. Methods ------- assert_compute() Defines how to determine that results computed for the `SomeFactor` factor are correct. Tests ----- test_windows_with_varying_num_estimates() Tests that we create the correct overwrites from 2015-01-13 to 2015-01-14 regardless of how event dates were updated for each quarter for each sid. """ @classmethod def make_events(cls): return pd.DataFrame({ SID_FIELD_NAME: [0] * 3 + [1] * 3, TS_FIELD_NAME: [pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-12'), pd.Timestamp('2015-01-13')] * 2, EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'), pd.Timestamp('2015-01-13'), pd.Timestamp('2015-01-20'), pd.Timestamp('2015-01-13'), pd.Timestamp('2015-01-12'), pd.Timestamp('2015-01-20')], 'estimate': [11., 12., 21.] * 2, FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2, FISCAL_YEAR_FIELD_NAME: [2015] * 6 }) @classmethod def assert_compute(cls, estimate, today): raise NotImplementedError('assert_compute') def test_windows_with_varying_num_estimates(self): dataset = QuartersEstimates(1) assert_compute = self.assert_compute class SomeFactor(CustomFactor): inputs = [dataset.estimate] window_length = 3 def compute(self, today, assets, out, estimate): assert_compute(estimate, today) engine = self.make_engine() engine.run_pipeline( Pipeline({'est': SomeFactor()}), start_date=pd.Timestamp('2015-01-13', tz='utc'), # last event date we have end_date=pd.Timestamp('2015-01-14', tz='utc'), ) class PreviousVaryingNumEstimates( WithVaryingNumEstimates, ZiplineTestCase ): def assert_compute(self, estimate, today): if today == pd.Timestamp('2015-01-13', tz='utc'): assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12])) assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12])) else: assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12])) assert_array_equal(estimate[:, 1], np.array([12, 12, 12])) @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazePreviousEstimatesLoader( bz.data(events), columns, ) class NextVaryingNumEstimates( WithVaryingNumEstimates, ZiplineTestCase ): def assert_compute(self, estimate, today): if today == pd.Timestamp('2015-01-13', tz='utc'): assert_array_equal(estimate[:, 0], np.array([11, 12, 12])) assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21])) else: assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21])) assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21])) @classmethod def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazeNextEstimatesLoader( bz.data(events), columns, ) class WithEstimateWindows(WithEstimates): """ ZiplineTestCase mixin providing fixures and a test to test running a Pipeline with an estimates loader over differently-sized windows. Attributes ---------- events : pd.DataFrame DataFrame with estimates for 2 quarters for 2 sids. window_test_start_date : pd.Timestamp The date from which the window should start. timelines : dict[int -> pd.DataFrame] A dictionary mapping to the number of quarters out to snapshots of how the data should look on each date in the date range. Methods ------- make_expected_timelines() -> dict[int -> pd.DataFrame] Creates a dictionary of expected data. See `timelines`, above. Tests ----- test_estimate_windows_at_quarter_boundaries() Tests that we overwrite values with the correct quarter's estimate at the correct dates when we have a factor that asks for a window of data. """ END_DATE = pd.Timestamp('2015-02-10') window_test_start_date = pd.Timestamp('2015-01-05') critical_dates = [pd.Timestamp('2015-01-09', tz='utc'), pd.Timestamp('2015-01-15', tz='utc'), pd.Timestamp('2015-01-20', tz='utc'), pd.Timestamp('2015-01-26', tz='utc'), pd.Timestamp('2015-02-05', tz='utc'), pd.Timestamp('2015-02-10', tz='utc')] # Starting date, number of announcements out. window_test_cases = list(itertools.product(critical_dates, (1, 2))) @classmethod def make_events(cls): # Typical case: 2 consecutive quarters. sid_0_timeline = pd.DataFrame({ TS_FIELD_NAME: [cls.window_test_start_date, pd.Timestamp('2015-01-20'), pd.Timestamp('2015-01-12'), pd.Timestamp('2015-02-10'), # We want a case where we get info for a later # quarter before the current quarter is over but # after the split_asof_date to make sure that # we choose the correct date to overwrite until. pd.Timestamp('2015-01-18')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-20'), pd.Timestamp('2015-01-20'), pd.Timestamp('2015-02-10'), pd.Timestamp('2015-02-10'), pd.Timestamp('2015-04-01')], 'estimate': [100., 101.] + [200., 201.] + [400], FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4], FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 0, }) # We want a case where we skip a quarter. We never find out about Q2. sid_10_timeline = pd.DataFrame({ TS_FIELD_NAME: [pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-12'), pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-15')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'), pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')], 'estimate': [110., 111.] + [310., 311.], FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2, FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 10 }) # We want to make sure we have correct overwrites when sid quarter # boundaries collide. This sid's quarter boundaries collide with sid 0. sid_20_timeline = pd.DataFrame({ TS_FIELD_NAME: [cls.window_test_start_date, pd.Timestamp('2015-01-07'), cls.window_test_start_date, pd.Timestamp('2015-01-17')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-20'), pd.Timestamp('2015-01-20'), pd.Timestamp('2015-02-10'), pd.Timestamp('2015-02-10')], 'estimate': [120., 121.] + [220., 221.], FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2, FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 20 }) concatted = pd.concat([sid_0_timeline, sid_10_timeline, sid_20_timeline]).reset_index() np.random.seed(0) return concatted.reindex(np.random.permutation(concatted.index)) @classmethod def get_sids(cls): sids = sorted(cls.events[SID_FIELD_NAME].unique()) # Add extra sids between sids in our data. We want to test that we # apply adjustments to the correct sids. return [sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i+1])] + [sids[-1]] @classmethod def make_expected_timelines(cls): return {} @classmethod def init_class_fixtures(cls): super(WithEstimateWindows, cls).init_class_fixtures() cls.create_expected_df_for_factor_compute = partial( create_expected_df_for_factor_compute, cls.window_test_start_date, cls.get_sids() ) cls.timelines = cls.make_expected_timelines() @parameterized.expand(window_test_cases) def test_estimate_windows_at_quarter_boundaries(self, start_date, num_announcements_out): dataset = QuartersEstimates(num_announcements_out) trading_days = self.trading_days timelines = self.timelines # The window length should be from the starting index back to the first # date on which we got data. The goal is to ensure that as we # progress through the timeline, all data we got, starting from that # first date, is correctly overwritten. window_len = ( self.trading_days.get_loc(start_date) - self.trading_days.get_loc(self.window_test_start_date) + 1 ) class SomeFactor(CustomFactor): inputs = [dataset.estimate] window_length = window_len def compute(self, today, assets, out, estimate): today_idx = trading_days.get_loc(today) today_timeline = timelines[ num_announcements_out ].loc[today].reindex( trading_days[:today_idx + 1] ).values timeline_start_idx = (len(today_timeline) - window_len) assert_almost_equal(estimate, today_timeline[timeline_start_idx:]) engine = self.make_engine() engine.run_pipeline( Pipeline({'est': SomeFactor()}), start_date=start_date, # last event date we have end_date=pd.Timestamp('2015-02-10', tz='utc'), ) class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return PreviousEarningsEstimatesLoader(events, columns) @classmethod def make_expected_timelines(cls): oneq_previous = pd.concat([ pd.concat([ cls.create_expected_df_for_factor_compute([ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date) ], end_date) for end_date in pd.date_range('2015-01-09', '2015-01-19') ]), cls.create_expected_df_for_factor_compute( [(0, 101, pd.Timestamp('2015-01-20')), (10, np.NaN, cls.window_test_start_date), (20, 121, pd.Timestamp('2015-01-20'))], pd.Timestamp('2015-01-20') ), cls.create_expected_df_for_factor_compute( [(0, 101, pd.Timestamp('2015-01-20')), (10, np.NaN, cls.window_test_start_date), (20, 121, pd.Timestamp('2015-01-20'))], pd.Timestamp('2015-01-21') ), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 101, pd.Timestamp('2015-01-20')), (10, 111, pd.Timestamp('2015-01-22')), (20, 121, pd.Timestamp('2015-01-20'))], end_date ) for end_date in pd.date_range('2015-01-22', '2015-02-04') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 101, pd.Timestamp('2015-01-20')), (10, 311, pd.Timestamp('2015-02-05')), (20, 121, pd.Timestamp('2015-01-20'))], end_date ) for end_date in pd.date_range('2015-02-05', '2015-02-09') ]), cls.create_expected_df_for_factor_compute( [(0, 201, pd.Timestamp('2015-02-10')), (10, 311, pd.Timestamp('2015-02-05')), (20, 221, pd.Timestamp('2015-02-10'))], pd.Timestamp('2015-02-10') ), ]) twoq_previous = pd.concat( [cls.create_expected_df_for_factor_compute( [(0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date)], end_date ) for end_date in pd.date_range('2015-01-09', '2015-02-09')] + # We never get estimates for S1 for 2Q ago because once Q3 # becomes our previous quarter, 2Q ago would be Q2, and we have # no data on it. [cls.create_expected_df_for_factor_compute( [(0, 101, pd.Timestamp('2015-02-10')), (10, np.NaN, pd.Timestamp('2015-02-05')), (20, 121, pd.Timestamp('2015-02-10'))], pd.Timestamp('2015-02-10') )] ) return { 1: oneq_previous, 2: twoq_previous } @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazePreviousEstimateWindows(PreviousEstimateWindows): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazePreviousEstimatesLoader(bz.data(events), columns) class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): return NextEarningsEstimatesLoader(events, columns) @classmethod def make_expected_timelines(cls): oneq_next = pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 100, cls.window_test_start_date), (10, 110, pd.Timestamp('2015-01-09')), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp('2015-01-07'))], pd.Timestamp('2015-01-09') ), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 100, cls.window_test_start_date), (10, 110, pd.Timestamp('2015-01-09')), (10, 111, pd.Timestamp('2015-01-12')), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp('2015-01-07'))], end_date ) for end_date in pd.date_range('2015-01-12', '2015-01-19') ]), cls.create_expected_df_for_factor_compute( [(0, 100, cls.window_test_start_date), (0, 101, pd.Timestamp('2015-01-20')), (10, 110, pd.Timestamp('2015-01-09')), (10, 111, pd.Timestamp('2015-01-12')), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp('2015-01-07'))], pd.Timestamp('2015-01-20') ), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 200, pd.Timestamp('2015-01-12')), (10, 110, pd.Timestamp('2015-01-09')), (10, 111, pd.Timestamp('2015-01-12')), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp('2015-01-17'))], end_date ) for end_date in pd.date_range('2015-01-21', '2015-01-22') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 200, pd.Timestamp('2015-01-12')), (10, 310, pd.Timestamp('2015-01-09')), (10, 311, pd.Timestamp('2015-01-15')), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp('2015-01-17'))], end_date ) for end_date in pd.date_range('2015-01-23', '2015-02-05') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 200, pd.Timestamp('2015-01-12')), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp('2015-01-17'))], end_date ) for end_date in pd.date_range('2015-02-06', '2015-02-09') ]), cls.create_expected_df_for_factor_compute( [(0, 200, pd.Timestamp('2015-01-12')), (0, 201, pd.Timestamp('2015-02-10')), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp('2015-01-17'))], pd.Timestamp('2015-02-10') ) ]) twoq_next = pd.concat( [cls.create_expected_df_for_factor_compute( [(0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date)], end_date ) for end_date in pd.date_range('2015-01-09', '2015-01-11')] + [cls.create_expected_df_for_factor_compute( [(0, 200, pd.Timestamp('2015-01-12')), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date)], end_date ) for end_date in pd.date_range('2015-01-12', '2015-01-16')] + [cls.create_expected_df_for_factor_compute( [(0, 200, pd.Timestamp('2015-01-12')), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (20, 221, pd.Timestamp('2015-01-17'))], pd.Timestamp('2015-01-20') )] + [cls.create_expected_df_for_factor_compute( [(0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date)], end_date ) for end_date in pd.date_range('2015-01-21', '2015-02-10')] ) return { 1: oneq_next, 2: twoq_next } @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazeNextEstimateWindows(NextEstimateWindows): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazeNextEstimatesLoader(bz.data(events), columns) class WithSplitAdjustedWindows(WithEstimateWindows): """ ZiplineTestCase mixin providing fixures and a test to test running a Pipeline with an estimates loader over differently-sized windows and with split adjustments. """ split_adjusted_asof_date = pd.Timestamp('2015-01-14') @classmethod def make_events(cls): # Add an extra sid that has a release before the split-asof-date in # order to test that we're reversing splits correctly in the previous # case (without an overwrite) and in the next case (with an overwrite). sid_30 = pd.DataFrame({ TS_FIELD_NAME: [cls.window_test_start_date, pd.Timestamp('2015-01-09'), # For Q2, we want it to start early enough # that we can have several adjustments before # the end of the first quarter so that we # can test un-adjusting & readjusting with an # overwrite. cls.window_test_start_date, # We want the Q2 event date to be enough past # the split-asof-date that we can have # several splits and can make sure that they # are applied correctly. pd.Timestamp('2015-01-20')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-20'), pd.Timestamp('2015-01-20')], 'estimate': [130., 131., 230., 231.], FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2, FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 30 }) # An extra sid to test no splits before the split-adjusted-asof-date. # We want an event before and after the split-adjusted-asof-date & # timestamps for data points also before and after # split-adjsuted-asof-date (but also before the split dates, so that # we can test that splits actually get applied at the correct times). sid_40 = pd.DataFrame({ TS_FIELD_NAME: [pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-15')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'), pd.Timestamp('2015-02-10')], 'estimate': [140., 240.], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 40 }) # An extra sid to test all splits before the # split-adjusted-asof-date. All timestamps should be before that date # so that we have cases where we un-apply and re-apply splits. sid_50 = pd.DataFrame({ TS_FIELD_NAME: [pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-12')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'), pd.Timestamp('2015-02-10')], 'estimate': [150., 250.], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 50 }) return pd.concat([ # Slightly hacky, but want to make sure we're using the same # events as WithEstimateWindows. cls.__base__.make_events(), sid_30, sid_40, sid_50, ]) @classmethod def make_splits_data(cls): # For sid 0, we want to apply a series of splits before and after the # split-adjusted-asof-date we well as between quarters (for the # previous case, where we won't see any values until after the event # happens). sid_0_splits = pd.DataFrame({ SID_FIELD_NAME: 0, 'ratio': (-1., 2., 3., 4., 5., 6., 7., 100), 'effective_date': (pd.Timestamp('2014-01-01'), # Filter out # Split before Q1 event & after first estimate pd.Timestamp('2015-01-07'), # Split before Q1 event pd.Timestamp('2015-01-09'), # Split before Q1 event pd.Timestamp('2015-01-13'), # Split before Q1 event pd.Timestamp('2015-01-15'), # Split before Q1 event pd.Timestamp('2015-01-18'), # Split after Q1 event and before Q2 event pd.Timestamp('2015-01-30'), # Filter out - this is after our date index pd.Timestamp('2016-01-01')) }) sid_10_splits = pd.DataFrame({ SID_FIELD_NAME: 10, 'ratio': (.2, .3), 'effective_date': ( # We want a split before the first estimate and before the # split-adjusted-asof-date but within our calendar index so # that we can test that the split is NEVER applied. pd.Timestamp('2015-01-07'), # Apply a single split before Q1 event. pd.Timestamp('2015-01-20')), }) # We want a sid with split dates that collide with another sid (0) to # make sure splits are correctly applied for both sids. sid_20_splits = pd.DataFrame({ SID_FIELD_NAME: 20, 'ratio': (.4, .5, .6, .7, .8, .9,), 'effective_date': ( pd.Timestamp('2015-01-07'), pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-13'), pd.Timestamp('2015-01-15'), pd.Timestamp('2015-01-18'), pd.Timestamp('2015-01-30')), }) # This sid has event dates that are shifted back so that we can test # cases where an event occurs before the split-asof-date. sid_30_splits = pd.DataFrame({ SID_FIELD_NAME: 30, 'ratio': (8, 9, 10, 11, 12), 'effective_date': ( # Split before the event and before the # split-asof-date. pd.Timestamp('2015-01-07'), # Split on date of event but before the # split-asof-date. pd.Timestamp('2015-01-09'), # Split after the event, but before the # split-asof-date. pd.Timestamp('2015-01-13'), pd.Timestamp('2015-01-15'), pd.Timestamp('2015-01-18')), }) # No splits for a sid before the split-adjusted-asof-date. sid_40_splits = pd.DataFrame({ SID_FIELD_NAME: 40, 'ratio': (13, 14), 'effective_date': ( pd.Timestamp('2015-01-20'), pd.Timestamp('2015-01-22') ) }) # No splits for a sid after the split-adjusted-asof-date. sid_50_splits = pd.DataFrame({ SID_FIELD_NAME: 50, 'ratio': (15, 16), 'effective_date': ( pd.Timestamp('2015-01-13'), pd.Timestamp('2015-01-14') ) }) return pd.concat([ sid_0_splits, sid_10_splits, sid_20_splits, sid_30_splits, sid_40_splits, sid_50_splits, ]) class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return PreviousSplitAdjustedEarningsEstimatesLoader( events, columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=['estimate'], split_adjusted_asof=cls.split_adjusted_asof_date, ) @classmethod def make_expected_timelines(cls): oneq_previous = pd.concat([ pd.concat([ cls.create_expected_df_for_factor_compute([ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), # Undo all adjustments that haven't happened yet. (30, 131*1/10, pd.Timestamp('2015-01-09')), (40, 140., pd.Timestamp('2015-01-09')), (50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')), ], end_date) for end_date in pd.date_range('2015-01-09', '2015-01-12') ]), cls.create_expected_df_for_factor_compute([ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, 131, pd.Timestamp('2015-01-09')), (40, 140., pd.Timestamp('2015-01-09')), (50, 150. * 1 / 16, pd.Timestamp('2015-01-09')), ], pd.Timestamp('2015-01-13')), cls.create_expected_df_for_factor_compute([ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, 131, pd.Timestamp('2015-01-09')), (40, 140., pd.Timestamp('2015-01-09')), (50, 150., pd.Timestamp('2015-01-09')) ], pd.Timestamp('2015-01-14')), pd.concat([ cls.create_expected_df_for_factor_compute([ (0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, 131*11, pd.Timestamp('2015-01-09')), (40, 140., pd.Timestamp('2015-01-09')), (50, 150., pd.Timestamp('2015-01-09')), ], end_date) for end_date in pd.date_range('2015-01-15', '2015-01-16') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 101, pd.Timestamp('2015-01-20')), (10, np.NaN, cls.window_test_start_date), (20, 121*.7*.8, pd.Timestamp('2015-01-20')), (30, 231, pd.Timestamp('2015-01-20')), (40, 140.*13, pd.Timestamp('2015-01-09')), (50, 150., pd.Timestamp('2015-01-09'))], end_date ) for end_date in pd.date_range('2015-01-20', '2015-01-21') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 101, pd.Timestamp('2015-01-20')), (10, 111*.3, pd.Timestamp('2015-01-22')), (20, 121*.7*.8, pd.Timestamp('2015-01-20')), (30, 231, pd.Timestamp('2015-01-20')), (40, 140.*13*14, pd.Timestamp('2015-01-09')), (50, 150., pd.Timestamp('2015-01-09'))], end_date ) for end_date in pd.date_range('2015-01-22', '2015-01-29') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 101*7, pd.Timestamp('2015-01-20')), (10, 111*.3, pd.Timestamp('2015-01-22')), (20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')), (30, 231, pd.Timestamp('2015-01-20')), (40, 140.*13*14, pd.Timestamp('2015-01-09')), (50, 150., pd.Timestamp('2015-01-09'))], end_date ) for end_date in pd.date_range('2015-01-30', '2015-02-04') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 101*7, pd.Timestamp('2015-01-20')), (10, 311*.3, pd.Timestamp('2015-02-05')), (20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')), (30, 231, pd.Timestamp('2015-01-20')), (40, 140.*13*14, pd.Timestamp('2015-01-09')), (50, 150., pd.Timestamp('2015-01-09'))], end_date ) for end_date in pd.date_range('2015-02-05', '2015-02-09') ]), cls.create_expected_df_for_factor_compute( [(0, 201, pd.Timestamp('2015-02-10')), (10, 311*.3, pd.Timestamp('2015-02-05')), (20, 221*.8*.9, pd.Timestamp('2015-02-10')), (30, 231, pd.Timestamp('2015-01-20')), (40, 240.*13*14, pd.Timestamp('2015-02-10')), (50, 250., pd.Timestamp('2015-02-10'))], pd.Timestamp('2015-02-10') ), ]) twoq_previous = pd.concat( [cls.create_expected_df_for_factor_compute( [(0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, np.NaN, cls.window_test_start_date)], end_date ) for end_date in pd.date_range('2015-01-09', '2015-01-19')] + [cls.create_expected_df_for_factor_compute( [(0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, 131*11*12, pd.Timestamp('2015-01-20'))], end_date ) for end_date in pd.date_range('2015-01-20', '2015-02-09')] + # We never get estimates for S1 for 2Q ago because once Q3 # becomes our previous quarter, 2Q ago would be Q2, and we have # no data on it. [cls.create_expected_df_for_factor_compute( [(0, 101*7, pd.Timestamp('2015-02-10')), (10, np.NaN, pd.Timestamp('2015-02-05')), (20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')), (30, 131*11*12, pd.Timestamp('2015-01-20')), (40, 140. * 13 * 14, pd.Timestamp('2015-02-10')), (50, 150., pd.Timestamp('2015-02-10'))], pd.Timestamp('2015-02-10') )] ) return { 1: oneq_previous, 2: twoq_previous } @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazePreviousSplitAdjustedEstimatesLoader( bz.data(events), columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=['estimate'], split_adjusted_asof=cls.split_adjusted_asof_date, ) class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase): @classmethod def make_loader(cls, events, columns): return NextSplitAdjustedEarningsEstimatesLoader( events, columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=['estimate'], split_adjusted_asof=cls.split_adjusted_asof_date, ) @classmethod def make_expected_timelines(cls): oneq_next = pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 100*1/4, cls.window_test_start_date), (10, 110, pd.Timestamp('2015-01-09')), (20, 120*5/3, cls.window_test_start_date), (20, 121*5/3, pd.Timestamp('2015-01-07')), (30, 130*1/10, cls.window_test_start_date), (30, 131*1/10, pd.Timestamp('2015-01-09')), (40, 140, pd.Timestamp('2015-01-09')), (50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))], pd.Timestamp('2015-01-09') ), cls.create_expected_df_for_factor_compute( [(0, 100*1/4, cls.window_test_start_date), (10, 110, pd.Timestamp('2015-01-09')), (10, 111, pd.Timestamp('2015-01-12')), (20, 120*5/3, cls.window_test_start_date), (20, 121*5/3, pd.Timestamp('2015-01-07')), (30, 230*1/10, cls.window_test_start_date), (40, np.NaN, pd.Timestamp('2015-01-10')), (50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))], pd.Timestamp('2015-01-12') ), cls.create_expected_df_for_factor_compute( [(0, 100, cls.window_test_start_date), (10, 110, pd.Timestamp('2015-01-09')), (10, 111, pd.Timestamp('2015-01-12')), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp('2015-01-07')), (30, 230, cls.window_test_start_date), (40, np.NaN, pd.Timestamp('2015-01-10')), (50, 250.*1/16, pd.Timestamp('2015-01-12'))], pd.Timestamp('2015-01-13') ), cls.create_expected_df_for_factor_compute( [(0, 100, cls.window_test_start_date), (10, 110, pd.Timestamp('2015-01-09')), (10, 111, pd.Timestamp('2015-01-12')), (20, 120, cls.window_test_start_date), (20, 121, pd.Timestamp('2015-01-07')), (30, 230, cls.window_test_start_date), (40, np.NaN, pd.Timestamp('2015-01-10')), (50, 250., pd.Timestamp('2015-01-12'))], pd.Timestamp('2015-01-14') ), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 100*5, cls.window_test_start_date), (10, 110, pd.Timestamp('2015-01-09')), (10, 111, pd.Timestamp('2015-01-12')), (20, 120*.7, cls.window_test_start_date), (20, 121*.7, pd.Timestamp('2015-01-07')), (30, 230*11, cls.window_test_start_date), (40, 240, pd.Timestamp('2015-01-15')), (50, 250., pd.Timestamp('2015-01-12'))], end_date ) for end_date in pd.date_range('2015-01-15', '2015-01-16') ]), cls.create_expected_df_for_factor_compute( [(0, 100*5*6, cls.window_test_start_date), (0, 101, pd.Timestamp('2015-01-20')), (10, 110*.3, pd.Timestamp('2015-01-09')), (10, 111*.3, pd.Timestamp('2015-01-12')), (20, 120*.7*.8, cls.window_test_start_date), (20, 121*.7*.8, pd.Timestamp('2015-01-07')), (30, 230*11*12, cls.window_test_start_date), (30, 231, pd.Timestamp('2015-01-20')), (40, 240*13, pd.Timestamp('2015-01-15')), (50, 250., pd.Timestamp('2015-01-12'))], pd.Timestamp('2015-01-20') ), cls.create_expected_df_for_factor_compute( [(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')), (10, 110 * .3, pd.Timestamp('2015-01-09')), (10, 111 * .3, pd.Timestamp('2015-01-12')), (20, 220 * .7 * .8, cls.window_test_start_date), (20, 221 * .8, pd.Timestamp('2015-01-17')), (40, 240 * 13, pd.Timestamp('2015-01-15')), (50, 250., pd.Timestamp('2015-01-12'))], pd.Timestamp('2015-01-21') ), cls.create_expected_df_for_factor_compute( [(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')), (10, 110 * .3, pd.Timestamp('2015-01-09')), (10, 111 * .3, pd.Timestamp('2015-01-12')), (20, 220 * .7 * .8, cls.window_test_start_date), (20, 221 * .8, pd.Timestamp('2015-01-17')), (40, 240 * 13 * 14, pd.Timestamp('2015-01-15')), (50, 250., pd.Timestamp('2015-01-12'))], pd.Timestamp('2015-01-22') ), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 200*5*6, pd.Timestamp('2015-01-12')), (10, 310*.3, pd.Timestamp('2015-01-09')), (10, 311*.3, pd.Timestamp('2015-01-15')), (20, 220*.7*.8, cls.window_test_start_date), (20, 221*.8, pd.Timestamp('2015-01-17')), (40, 240 * 13 * 14, pd.Timestamp('2015-01-15')), (50, 250., pd.Timestamp('2015-01-12'))], end_date ) for end_date in pd.date_range('2015-01-23', '2015-01-29') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 200*5*6*7, pd.Timestamp('2015-01-12')), (10, 310*.3, pd.Timestamp('2015-01-09')), (10, 311*.3, pd.Timestamp('2015-01-15')), (20, 220*.7*.8*.9, cls.window_test_start_date), (20, 221*.8*.9, pd.Timestamp('2015-01-17')), (40, 240 * 13 * 14, pd.Timestamp('2015-01-15')), (50, 250., pd.Timestamp('2015-01-12'))], end_date ) for end_date in pd.date_range('2015-01-30', '2015-02-05') ]), pd.concat([ cls.create_expected_df_for_factor_compute( [(0, 200*5*6*7, pd.Timestamp('2015-01-12')), (10, np.NaN, cls.window_test_start_date), (20, 220*.7*.8*.9, cls.window_test_start_date), (20, 221*.8*.9, pd.Timestamp('2015-01-17')), (40, 240 * 13 * 14, pd.Timestamp('2015-01-15')), (50, 250., pd.Timestamp('2015-01-12'))], end_date ) for end_date in pd.date_range('2015-02-06', '2015-02-09') ]), cls.create_expected_df_for_factor_compute( [(0, 200*5*6*7, pd.Timestamp('2015-01-12')), (0, 201, pd.Timestamp('2015-02-10')), (10, np.NaN, cls.window_test_start_date), (20, 220*.7*.8*.9, cls.window_test_start_date), (20, 221*.8*.9, pd.Timestamp('2015-01-17')), (40, 240 * 13 * 14, pd.Timestamp('2015-01-15')), (50, 250., pd.Timestamp('2015-01-12'))], pd.Timestamp('2015-02-10') ) ]) twoq_next = pd.concat( [cls.create_expected_df_for_factor_compute( [(0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, 220*5/3, cls.window_test_start_date), (30, 230*1/10, cls.window_test_start_date), (40, np.NaN, cls.window_test_start_date), (50, np.NaN, cls.window_test_start_date)], pd.Timestamp('2015-01-09') )] + [cls.create_expected_df_for_factor_compute( [(0, 200*1/4, pd.Timestamp('2015-01-12')), (10, np.NaN, cls.window_test_start_date), (20, 220*5/3, cls.window_test_start_date), (30, np.NaN, cls.window_test_start_date), (40, np.NaN, cls.window_test_start_date)], pd.Timestamp('2015-01-12') )] + [cls.create_expected_df_for_factor_compute( [(0, 200, pd.Timestamp('2015-01-12')), (10, np.NaN, cls.window_test_start_date), (20, 220, cls.window_test_start_date), (30, np.NaN, cls.window_test_start_date), (40, np.NaN, cls.window_test_start_date)], end_date ) for end_date in pd.date_range('2015-01-13', '2015-01-14')] + [cls.create_expected_df_for_factor_compute( [(0, 200*5, pd.Timestamp('2015-01-12')), (10, np.NaN, cls.window_test_start_date), (20, 220*.7, cls.window_test_start_date), (30, np.NaN, cls.window_test_start_date), (40, np.NaN, cls.window_test_start_date)], end_date ) for end_date in pd.date_range('2015-01-15', '2015-01-16')] + [cls.create_expected_df_for_factor_compute( [(0, 200*5*6, pd.Timestamp('2015-01-12')), (10, np.NaN, cls.window_test_start_date), (20, 220*.7*.8, cls.window_test_start_date), (20, 221*.8, pd.Timestamp('2015-01-17')), (30, np.NaN, cls.window_test_start_date), (40, np.NaN, cls.window_test_start_date)], pd.Timestamp('2015-01-20') )] + [cls.create_expected_df_for_factor_compute( [(0, np.NaN, cls.window_test_start_date), (10, np.NaN, cls.window_test_start_date), (20, np.NaN, cls.window_test_start_date), (30, np.NaN, cls.window_test_start_date), (40, np.NaN, cls.window_test_start_date)], end_date ) for end_date in pd.date_range('2015-01-21', '2015-02-10')] ) return { 1: oneq_next, 2: twoq_next } @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazeNextWithSplitAdjustedWindows(NextWithSplitAdjustedWindows): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazeNextSplitAdjustedEstimatesLoader( bz.data(events), columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=['estimate'], split_adjusted_asof=cls.split_adjusted_asof_date, ) class WithSplitAdjustedMultipleEstimateColumns(WithEstimates): """ ZiplineTestCase mixin for having multiple estimate columns that are split-adjusted to make sure that adjustments are applied correctly. Attributes ---------- test_start_date : pd.Timestamp The start date of the test. test_end_date : pd.Timestamp The start date of the test. split_adjusted_asof : pd.Timestamp The split-adjusted-asof-date of the data used in the test, to be used to create all loaders of test classes that subclass this mixin. Methods ------- make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str -> np.array]] The expected array of results for each date of the date range for each column. Only for 1 quarter out. make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str -> np.array]] The expected array of results for each date of the date range. For 2 quarters out, so only for the column that is requested to be loaded with 2 quarters out. Tests ----- test_adjustments_with_multiple_adjusted_columns Tests that if you have multiple columns, we still split-adjust correctly. test_multiple_datasets_different_num_announcements Tests that if you have multiple datasets that ask for a different number of quarters out, and each asks for a different estimates column, we still split-adjust correctly. """ END_DATE = pd.Timestamp('2015-02-10') test_start_date = pd.Timestamp('2015-01-06', tz='utc') test_end_date = pd.Timestamp('2015-01-12', tz='utc') split_adjusted_asof = pd.Timestamp('2015-01-08') @classmethod def make_columns(cls): return { MultipleColumnsEstimates.event_date: 'event_date', MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter', MultipleColumnsEstimates.fiscal_year: 'fiscal_year', MultipleColumnsEstimates.estimate1: 'estimate1', MultipleColumnsEstimates.estimate2: 'estimate2' } @classmethod def make_events(cls): sid_0_events = pd.DataFrame({ # We only want a stale KD here so that adjustments # will be applied. TS_FIELD_NAME: [pd.Timestamp('2015-01-05'), pd.Timestamp('2015-01-05')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'), pd.Timestamp('2015-01-12')], 'estimate1': [1100., 1200.], 'estimate2': [2100., 2200.], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 0, }) # This is just an extra sid to make sure that we apply adjustments # correctly for multiple columns when we have multiple sids. sid_1_events = pd.DataFrame({ # We only want a stale KD here so that adjustments # will be applied. TS_FIELD_NAME: [pd.Timestamp('2015-01-05'), pd.Timestamp('2015-01-05')], EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-08'), pd.Timestamp('2015-01-11')], 'estimate1': [1110., 1210.], 'estimate2': [2110., 2210.], FISCAL_QUARTER_FIELD_NAME: [1, 2], FISCAL_YEAR_FIELD_NAME: 2015, SID_FIELD_NAME: 1, }) return pd.concat([sid_0_events, sid_1_events]) @classmethod def make_splits_data(cls): sid_0_splits = pd.DataFrame({ SID_FIELD_NAME: 0, 'ratio': (.3, 3.), 'effective_date': (pd.Timestamp('2015-01-07'), pd.Timestamp('2015-01-09')), }) sid_1_splits = pd.DataFrame({ SID_FIELD_NAME: 1, 'ratio': (.4, 4.), 'effective_date': (pd.Timestamp('2015-01-07'), pd.Timestamp('2015-01-09')), }) return pd.concat([sid_0_splits, sid_1_splits]) @classmethod def make_expected_timelines_1q_out(cls): return {} @classmethod def make_expected_timelines_2q_out(cls): return {} @classmethod def init_class_fixtures(cls): super( WithSplitAdjustedMultipleEstimateColumns, cls ).init_class_fixtures() cls.timelines_1q_out = cls.make_expected_timelines_1q_out() cls.timelines_2q_out = cls.make_expected_timelines_2q_out() def test_adjustments_with_multiple_adjusted_columns(self): dataset = MultipleColumnsQuartersEstimates(1) timelines = self.timelines_1q_out window_len = 3 class SomeFactor(CustomFactor): inputs = [dataset.estimate1, dataset.estimate2] window_length = window_len def compute(self, today, assets, out, estimate1, estimate2): assert_almost_equal(estimate1, timelines[today]['estimate1']) assert_almost_equal(estimate2, timelines[today]['estimate2']) engine = self.make_engine() engine.run_pipeline( Pipeline({'est': SomeFactor()}), start_date=self.test_start_date, # last event date we have end_date=self.test_end_date, ) def test_multiple_datasets_different_num_announcements(self): dataset1 = MultipleColumnsQuartersEstimates(1) dataset2 = MultipleColumnsQuartersEstimates(2) timelines_1q_out = self.timelines_1q_out timelines_2q_out = self.timelines_2q_out window_len = 3 class SomeFactor1(CustomFactor): inputs = [dataset1.estimate1] window_length = window_len def compute(self, today, assets, out, estimate1): assert_almost_equal( estimate1, timelines_1q_out[today]['estimate1'] ) class SomeFactor2(CustomFactor): inputs = [dataset2.estimate2] window_length = window_len def compute(self, today, assets, out, estimate2): assert_almost_equal( estimate2, timelines_2q_out[today]['estimate2'] ) engine = self.make_engine() engine.run_pipeline( Pipeline({'est1': SomeFactor1(), 'est2': SomeFactor2()}), start_date=self.test_start_date, # last event date we have end_date=self.test_end_date, ) class PreviousWithSplitAdjustedMultipleEstimateColumns( WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase ): @classmethod def make_loader(cls, events, columns): return PreviousSplitAdjustedEarningsEstimatesLoader( events, columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=['estimate1', 'estimate2'], split_adjusted_asof=cls.split_adjusted_asof, ) @classmethod def make_expected_timelines_1q_out(cls): return { pd.Timestamp('2015-01-06', tz='utc'): { 'estimate1': np.array([[np.NaN, np.NaN]] * 3), 'estimate2': np.array([[np.NaN, np.NaN]] * 3) }, pd.Timestamp('2015-01-07', tz='utc'): { 'estimate1': np.array([[np.NaN, np.NaN]] * 3), 'estimate2': np.array([[np.NaN, np.NaN]] * 3) }, pd.Timestamp('2015-01-08', tz='utc'): { 'estimate1': np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 1110.]]), 'estimate2': np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 2110.]]) }, pd.Timestamp('2015-01-09', tz='utc'): { 'estimate1': np.array([[np.NaN, np.NaN]] + [[np.NaN, 1110. * 4]] + [[1100 * 3., 1110. * 4]]), 'estimate2': np.array([[np.NaN, np.NaN]] + [[np.NaN, 2110. * 4]] + [[2100 * 3., 2110. * 4]]) }, pd.Timestamp('2015-01-12', tz='utc'): { 'estimate1': np.array([[np.NaN, np.NaN]] * 2 + [[1200 * 3., 1210. * 4]]), 'estimate2': np.array([[np.NaN, np.NaN]] * 2 + [[2200 * 3., 2210. * 4]]) } } @classmethod def make_expected_timelines_2q_out(cls): return { pd.Timestamp('2015-01-06', tz='utc'): { 'estimate2': np.array([[np.NaN, np.NaN]] * 3) }, pd.Timestamp('2015-01-07', tz='utc'): { 'estimate2': np.array([[np.NaN, np.NaN]] * 3) }, pd.Timestamp('2015-01-08', tz='utc'): { 'estimate2': np.array([[np.NaN, np.NaN]] * 3) }, pd.Timestamp('2015-01-09', tz='utc'): { 'estimate2': np.array([[np.NaN, np.NaN]] * 3) }, pd.Timestamp('2015-01-12', tz='utc'): { 'estimate2': np.array([[np.NaN, np.NaN]] * 2 + [[2100 * 3., 2110. * 4]]) } } @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazePreviousWithMultipleEstimateColumns( PreviousWithSplitAdjustedMultipleEstimateColumns ): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazePreviousSplitAdjustedEstimatesLoader( bz.data(events), columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=['estimate1', 'estimate2'], split_adjusted_asof=cls.split_adjusted_asof, ) class NextWithSplitAdjustedMultipleEstimateColumns( WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase ): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): return NextSplitAdjustedEarningsEstimatesLoader( events, columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=['estimate1', 'estimate2'], split_adjusted_asof=cls.split_adjusted_asof, ) @classmethod def make_expected_timelines_1q_out(cls): return { pd.Timestamp('2015-01-06', tz='utc'): { 'estimate1': np.array([[np.NaN, np.NaN]] + [[1100. * 1/.3, 1110. * 1/.4]] * 2), 'estimate2': np.array([[np.NaN, np.NaN]] + [[2100. * 1/.3, 2110. * 1/.4]] * 2), }, pd.Timestamp('2015-01-07', tz='utc'): { 'estimate1': np.array([[1100., 1110.]] * 3), 'estimate2': np.array([[2100., 2110.]] * 3) }, pd.Timestamp('2015-01-08', tz='utc'): { 'estimate1': np.array([[1100., 1110.]] * 3), 'estimate2': np.array([[2100., 2110.]] * 3) }, pd.Timestamp('2015-01-09', tz='utc'): { 'estimate1': np.array([[1100 * 3., 1210. * 4]] * 3), 'estimate2': np.array([[2100 * 3., 2210. * 4]] * 3) }, pd.Timestamp('2015-01-12', tz='utc'): { 'estimate1': np.array([[1200 * 3., np.NaN]] * 3), 'estimate2': np.array([[2200 * 3., np.NaN]] * 3) } } @classmethod def make_expected_timelines_2q_out(cls): return { pd.Timestamp('2015-01-06', tz='utc'): { 'estimate2': np.array([[np.NaN, np.NaN]] + [[2200 * 1/.3, 2210. * 1/.4]] * 2) }, pd.Timestamp('2015-01-07', tz='utc'): { 'estimate2': np.array([[2200., 2210.]] * 3) }, pd.Timestamp('2015-01-08', tz='utc'): { 'estimate2': np.array([[2200, 2210.]] * 3) }, pd.Timestamp('2015-01-09', tz='utc'): { 'estimate2': np.array([[2200 * 3., np.NaN]] * 3) }, pd.Timestamp('2015-01-12', tz='utc'): { 'estimate2': np.array([[np.NaN, np.NaN]] * 3) } } @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") class BlazeNextWithMultipleEstimateColumns( NextWithSplitAdjustedMultipleEstimateColumns ): @classmethod @unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows") def make_loader(cls, events, columns): import blaze as bz return BlazeNextSplitAdjustedEstimatesLoader( bz.data(events), columns, split_adjustments_loader=cls.adjustment_reader, split_adjusted_column_names=['estimate1', 'estimate2'], split_adjusted_asof=cls.split_adjusted_asof, ) class WithAdjustmentBoundaries(WithEstimates): """ ZiplineTestCase mixin providing class-level attributes, methods, and a test to make sure that when the split-adjusted-asof-date is not strictly within the date index, we can still apply adjustments correctly. Attributes ---------- split_adjusted_before_start : pd.Timestamp A split-adjusted-asof-date before the start date of the test. split_adjusted_after_end : pd.Timestamp A split-adjusted-asof-date before the end date of the test. split_adjusted_asof_dates : list of tuples of pd.Timestamp All the split-adjusted-asof-dates over which we want to parameterize the test. Methods ------- make_expected_out -> dict[pd.Timestamp -> pd.DataFrame] A dictionary of the expected output of the pipeline at each of the dates of interest. """ START_DATE = pd.Timestamp('2015-01-04') # We want to run the pipeline starting from `START_DATE`, but the # pipeline results will start from the next day, which is # `test_start_date`. test_start_date =
pd.Timestamp('2015-01-05')
pandas.Timestamp
import itertools import dask.dataframe as dd import dask.dataframe.groupby as ddgb import numpy as np import pandas import toolz from pandas import isnull import ibis import ibis.expr.operations as ops from ibis.backends.pandas.core import integer_types, scalar_types from ibis.backends.pandas.execution.strings import ( execute_series_join_scalar_sep, execute_series_regex_extract, execute_series_regex_replace, execute_series_regex_search, execute_series_right, execute_series_translate_scalar_scalar, execute_series_translate_scalar_series, execute_series_translate_series_scalar, execute_series_translate_series_series, execute_string_capitalize, execute_string_contains, execute_string_length_series, execute_string_like_series_string, execute_string_lower, execute_string_lpad, execute_string_lstrip, execute_string_repeat, execute_string_reverse, execute_string_rpad, execute_string_rstrip, execute_string_strip, execute_string_upper, execute_substring_int_int, haystack_to_series_of_lists, ) from ..dispatch import execute_node from .util import ( TypeRegistrationDict, make_selected_obj, register_types_to_dispatcher, ) DASK_DISPATCH_TYPES: TypeRegistrationDict = { ops.StringLength: [((dd.Series,), execute_string_length_series)], ops.Substring: [ ( ( dd.Series, integer_types, integer_types, ), execute_substring_int_int, ), ], ops.Strip: [((dd.Series,), execute_string_strip)], ops.LStrip: [((dd.Series,), execute_string_lstrip)], ops.RStrip: [((dd.Series,), execute_string_rstrip)], ops.LPad: [ ( ( dd.Series, (dd.Series,) + integer_types, (dd.Series, str), ), execute_string_lpad, ), ], ops.RPad: [ ( ( dd.Series, (dd.Series,) + integer_types, (dd.Series, str), ), execute_string_rpad, ), ], ops.Reverse: [((dd.Series,), execute_string_reverse)], ops.Lowercase: [((dd.Series,), execute_string_lower)], ops.Uppercase: [((dd.Series,), execute_string_upper)], ops.Capitalize: [((dd.Series,), execute_string_capitalize)], ops.Repeat: [ ((dd.Series, (dd.Series,) + integer_types), execute_string_repeat), ], ops.StringFind: [ ( ( dd.Series, (dd.Series, str), (dd.Series, type(None)) + integer_types, (dd.Series, type(None)) + integer_types, ), execute_string_contains, ) ], ops.StringSQLLike: [ ( ( dd.Series, str, (str, type(None)), ), execute_string_like_series_string, ), ], ops.RegexSearch: [ ( ( dd.Series, str, ), execute_series_regex_search, ) ], ops.RegexExtract: [ ( (dd.Series, (dd.Series, str), integer_types), execute_series_regex_extract, ), ], ops.RegexReplace: [ ( ( dd.Series, str, str, ), execute_series_regex_replace, ), ], ops.Translate: [ ( (dd.Series, dd.Series, dd.Series), execute_series_translate_series_series, ), ((dd.Series, dd.Series, str), execute_series_translate_series_scalar), ((dd.Series, str, dd.Series), execute_series_translate_scalar_series), ((dd.Series, str, str), execute_series_translate_scalar_scalar), ], ops.StrRight: [((dd.Series, integer_types), execute_series_right)], ops.StringJoin: [ (((dd.Series, str), list), execute_series_join_scalar_sep), ], } register_types_to_dispatcher(execute_node, DASK_DISPATCH_TYPES) @execute_node.register(ops.Substring, dd.Series, dd.Series, integer_types) def execute_substring_series_int(op, data, start, length, **kwargs): return execute_substring_series_series( op, data, start, dd.from_array(np.repeat(length, len(start))), **kwargs ) @execute_node.register(ops.Substring, dd.Series, integer_types, dd.Series) def execute_string_substring_int_series(op, data, start, length, **kwargs): return execute_substring_series_series( op, data, dd.from_array(np.repeat(start, len(length))), length, **kwargs, ) # TODO - substring - #2553 @execute_node.register(ops.Substring, dd.Series, dd.Series, dd.Series) def execute_substring_series_series(op, data, start, length, **kwargs): end = start + length # TODO - this is broken def iterate( value, start_iter=start.iteritems(), end_iter=end.iteritems(), ): _, begin = next(start_iter) _, end = next(end_iter) if (begin is not None and
isnull(begin)
pandas.isnull
import numpy as np from datetime import timedelta from distutils.version import LooseVersion import pandas as pd import pandas.util.testing as tm from pandas import to_timedelta from pandas.util.testing import assert_series_equal, assert_frame_equal from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex, timedelta_range, date_range, DatetimeIndex, Int64Index, _np_version_under1p10, Float64Index, Index, tslib) from pandas.tests.test_base import Ops class TestTimedeltaIndexOps(Ops): def setUp(self): super(TestTimedeltaIndexOps, self).setUp() mask = lambda x: isinstance(x, TimedeltaIndex) self.is_valid_objs = [o for o in self.objs if mask(o)] self.not_valid_objs = [] def test_ops_properties(self): self.check_ops_properties(['days', 'hours', 'minutes', 'seconds', 'milliseconds']) self.check_ops_properties(['microseconds', 'nanoseconds']) def test_asobject_tolist(self): idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx') expected_list = [Timedelta('1 days'), Timedelta('2 days'), Timedelta('3 days'), Timedelta('4 days')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assert_index_equal(result, expected) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT, timedelta(days=4)], name='idx') expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT, Timedelta('4 days')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assert_index_equal(result, expected) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) def test_minmax(self): # monotonic idx1 = TimedeltaIndex(['1 days', '2 days', '3 days']) self.assertTrue(idx1.is_monotonic) # non-monotonic idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT']) self.assertFalse(idx2.is_monotonic) for idx in [idx1, idx2]: self.assertEqual(idx.min(), Timedelta('1 days')), self.assertEqual(idx.max(), Timedelta('3 days')), self.assertEqual(idx.argmin(), 0) self.assertEqual(idx.argmax(), 2) for op in ['min', 'max']: # Return NaT obj = TimedeltaIndex([]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = TimedeltaIndex([pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) def test_numpy_minmax(self): dr = pd.date_range(start='2016-01-15', end='2016-01-20') td = TimedeltaIndex(np.asarray(dr)) self.assertEqual(np.min(td), Timedelta('16815 days')) self.assertEqual(np.max(td), Timedelta('16820 days')) errmsg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0) tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0) self.assertEqual(np.argmin(td), 0) self.assertEqual(np.argmax(td), 5) if not _np_version_under1p10: errmsg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0) tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0) def test_round(self): td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min') elt = td[1] expected_rng = TimedeltaIndex([ Timedelta('16801 days 00:00:00'), Timedelta('16801 days 00:00:00'), Timedelta('16801 days 01:00:00'), Timedelta('16801 days 02:00:00'), Timedelta('16801 days 02:00:00'), ]) expected_elt = expected_rng[1] tm.assert_index_equal(td.round(freq='H'), expected_rng) self.assertEqual(elt.round(freq='H'), expected_elt) msg = pd.tseries.frequencies._INVALID_FREQ_ERROR with self.assertRaisesRegexp(ValueError, msg): td.round(freq='foo') with tm.assertRaisesRegexp(ValueError, msg): elt.round(freq='foo') msg = "<MonthEnd> is a non-fixed frequency" tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M') tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M') def test_representation(self): idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')""" exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', " "freq='D')") exp3 = ("TimedeltaIndex(['1 days', '2 days'], " "dtype='timedelta64[ns]', freq='D')") exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], " "dtype='timedelta64[ns]', freq='D')") exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', " "'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)") with pd.option_context('display.width', 300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): for func in ['__repr__', '__unicode__', '__str__']: result = getattr(idx, func)() self.assertEqual(result, expected) def test_representation_to_series(self): idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """Series([], dtype: timedelta64[ns])""" exp2 = """0 1 days dtype: timedelta64[ns]""" exp3 = """0 1 days 1 2 days dtype: timedelta64[ns]""" exp4 = """0 1 days 1 2 days 2 3 days dtype: timedelta64[ns]""" exp5 = """0 1 days 00:00:01 1 2 days 00:00:00 2 3 days 00:00:00 dtype: timedelta64[ns]""" with pd.option_context('display.width', 300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): result = repr(pd.Series(idx)) self.assertEqual(result, expected) def test_summary(self): # GH9116 idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """TimedeltaIndex: 0 entries Freq: D""" exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days Freq: D""" exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days Freq: D""" exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days Freq: D""" exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days " "00:00:00") for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): result = idx.summary() self.assertEqual(result, expected) def test_add_iadd(self): # only test adding/sub offsets as + is now numeric # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = timedelta_range('1 days', '10 days') result = rng + delta expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00', freq='D') tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) # int rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) result = rng + 1 expected = timedelta_range('1 days 10:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng += 1 tm.assert_index_equal(rng, expected) def test_sub_isub(self): # only test adding/sub offsets as - is now numeric # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = timedelta_range('1 days', '10 days') result = rng - delta expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00') tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) # int rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) result = rng - 1 expected = timedelta_range('1 days 08:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng -= 1 tm.assert_index_equal(rng, expected) idx = TimedeltaIndex(['1 day', '2 day']) msg = "cannot subtract a datelike from a TimedeltaIndex" with tm.assertRaisesRegexp(TypeError, msg): idx - Timestamp('2011-01-01') result = Timestamp('2011-01-01') + idx expected = DatetimeIndex(['2011-01-02', '2011-01-03']) tm.assert_index_equal(result, expected) def test_ops_compat(self): offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] rng = timedelta_range('1 days', '10 days', name='foo') # multiply for offset in offsets: self.assertRaises(TypeError, lambda: rng * offset) # divide expected = Int64Index((np.arange(10) + 1) * 12, name='foo') for offset in offsets: result = rng / offset tm.assert_index_equal(result, expected, exact=False) # divide with nats rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') expected = Float64Index([12, np.nan, 24], name='foo') for offset in offsets: result = rng / offset tm.assert_index_equal(result, expected) # don't allow division by NaT (make could in the future) self.assertRaises(TypeError, lambda: rng / pd.NaT) def test_subtraction_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') self.assertRaises(TypeError, lambda: tdi - dt) self.assertRaises(TypeError, lambda: tdi - dti) self.assertRaises(TypeError, lambda: td - dt) self.assertRaises(TypeError, lambda: td - dti) result = dt - dti expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar') tm.assert_index_equal(result, expected) result = dti - dt expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar') tm.assert_index_equal(result, expected) result = tdi - td expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo') tm.assert_index_equal(result, expected, check_names=False) result = td - tdi expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo') tm.assert_index_equal(result, expected, check_names=False) result = dti - td expected = DatetimeIndex( ['20121231', '20130101', '20130102'], name='bar') tm.assert_index_equal(result, expected, check_names=False) result = dt - tdi expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo') tm.assert_index_equal(result, expected) def test_subtraction_ops_with_tz(self): # check that dt/dti subtraction ops with tz are validated dti = date_range('20130101', periods=3) ts = Timestamp('20130101') dt = ts.to_pydatetime() dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern') ts_tz = Timestamp('20130101').tz_localize('US/Eastern') ts_tz2 = Timestamp('20130101').tz_localize('CET') dt_tz = ts_tz.to_pydatetime() td = Timedelta('1 days') def _check(result, expected): self.assertEqual(result, expected) self.assertIsInstance(result, Timedelta) # scalars result = ts - ts expected = Timedelta('0 days') _check(result, expected) result = dt_tz - ts_tz expected = Timedelta('0 days') _check(result, expected) result = ts_tz - dt_tz expected = Timedelta('0 days') _check(result, expected) # tz mismatches self.assertRaises(TypeError, lambda: dt_tz - ts) self.assertRaises(TypeError, lambda: dt_tz - dt) self.assertRaises(TypeError, lambda: dt_tz - ts_tz2) self.assertRaises(TypeError, lambda: dt - dt_tz) self.assertRaises(TypeError, lambda: ts - dt_tz) self.assertRaises(TypeError, lambda: ts_tz2 - ts) self.assertRaises(TypeError, lambda: ts_tz2 - dt) self.assertRaises(TypeError, lambda: ts_tz - ts_tz2) # with dti self.assertRaises(TypeError, lambda: dti - ts_tz) self.assertRaises(TypeError, lambda: dti_tz - ts) self.assertRaises(TypeError, lambda: dti_tz - ts_tz2) result = dti_tz - dt_tz expected = TimedeltaIndex(['0 days', '1 days', '2 days']) tm.assert_index_equal(result, expected) result = dt_tz - dti_tz expected = TimedeltaIndex(['0 days', '-1 days', '-2 days']) tm.assert_index_equal(result, expected) result = dti_tz - ts_tz expected = TimedeltaIndex(['0 days', '1 days', '2 days']) tm.assert_index_equal(result, expected) result = ts_tz - dti_tz expected = TimedeltaIndex(['0 days', '-1 days', '-2 days']) tm.assert_index_equal(result, expected) result = td - td expected = Timedelta('0 days') _check(result, expected) result = dti_tz - td expected = DatetimeIndex( ['20121231', '20130101', '20130102'], tz='US/Eastern') tm.assert_index_equal(result, expected) def test_dti_tdi_numeric_ops(self): # These are normally union/diff set-like ops tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') # TODO(wesm): unused? # td = Timedelta('1 days') # dt = Timestamp('20130101') result = tdi - tdi expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo') tm.assert_index_equal(result, expected) result = tdi + tdi expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo') tm.assert_index_equal(result, expected) result = dti - tdi # name will be reset expected = DatetimeIndex(['20121231', pd.NaT, '20130101']) tm.assert_index_equal(result, expected) def test_sub_period(self): # GH 13078 # not supported, check TypeError p = pd.Period('2011-01-01', freq='D') for freq in [None, 'H']: idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq) with tm.assertRaises(TypeError): idx - p with tm.assertRaises(TypeError): p - idx def test_addition_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') result = tdi + dt expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo') tm.assert_index_equal(result, expected) result = dt + tdi expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo') tm.assert_index_equal(result, expected) result = td + tdi expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo') tm.assert_index_equal(result, expected) result = tdi + td expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo') tm.assert_index_equal(result, expected) # unequal length self.assertRaises(ValueError, lambda: tdi + dti[0:1]) self.assertRaises(ValueError, lambda: tdi[0:1] + dti) # random indexes self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3])) # this is a union! # self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi) result = tdi + dti # name will be reset expected = DatetimeIndex(['20130102', pd.NaT, '20130105']) tm.assert_index_equal(result, expected) result = dti + tdi # name will be reset expected = DatetimeIndex(['20130102', pd.NaT, '20130105']) tm.assert_index_equal(result, expected) result = dt + td expected = Timestamp('20130102') self.assertEqual(result, expected) result = td + dt expected = Timestamp('20130102') self.assertEqual(result, expected) def test_comp_nat(self): left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')]) right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')]) for l, r in [(left, right), (left.asobject, right.asobject)]: result = l == r expected = np.array([False, False, True]) tm.assert_numpy_array_equal(result, expected) result = l != r expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l == pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT == r, expected) expected = np.array([True, True, True]) tm.assert_numpy_array_equal(l != pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT != l, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l < pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT > l, expected) def test_value_counts_unique(self): # GH 7735 idx = timedelta_range('1 days 09:00:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1))) exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10) expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) expected = timedelta_range('1 days 09:00:00', freq='H', periods=10) tm.assert_index_equal(idx.unique(), expected) idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00', '1 days 08:00:00', '1 days 08:00:00', pd.NaT]) exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00']) expected = Series([3, 2], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT]) expected = Series([3, 2, 1], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx) def test_nonunique_contains(self): # GH 9512 for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1], ['00:01:00', '00:01:00', '00:02:00'], ['00:01:00', '00:01:00', '00:00:01'])): tm.assertIn(idx[0], idx) def test_unknown_attribute(self): # GH 9680 tdi = pd.timedelta_range(start=0, periods=10, freq='1s') ts = pd.Series(np.random.normal(size=10), index=tdi) self.assertNotIn('foo', ts.__dict__.keys()) self.assertRaises(AttributeError, lambda: ts.foo) def test_order(self): # GH 10295 idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D', name='idx') idx2 = TimedeltaIndex( ['1 hour', '2 hour', '3 hour'], freq='H', name='idx') for idx in [idx1, idx2]: ordered = idx.sort_values() self.assert_index_equal(ordered, idx) self.assertEqual(ordered.freq, idx.freq) ordered = idx.sort_values(ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False) self.assertEqual(ordered.freq, idx.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, idx[::-1]) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour', '2 hour ', '1 hour'], name='idx1') exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour', '3 hour', '5 hour'], name='idx1') idx2 = TimedeltaIndex(['1 day', '3 day', '5 day', '2 day', '1 day'], name='idx2') # TODO(wesm): unused? # exp2 = TimedeltaIndex(['1 day', '1 day', '2 day', # '3 day', '5 day'], name='idx2') # idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute', # '2 minute', pd.NaT], name='idx3') # exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute', # '5 minute'], name='idx3') for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) self.assertIsNone(ordered.freq) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) exp = np.array([0, 4, 3, 1, 2]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) exp = np.array([2, 1, 3, 4, 0]) self.assert_numpy_array_equal(indexer, exp, check_dtype=False) self.assertIsNone(ordered.freq) def test_getitem(self): idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') for idx in [idx1]: result = idx[0] self.assertEqual(result, pd.Timedelta('1 day')) result = idx[0:5] expected = pd.timedelta_range('1 day', '5 day', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[0:10:2] expected = pd.timedelta_range('1 day', '9 day', freq='2D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[-20:-5:3] expected = pd.timedelta_range('12 day', '24 day', freq='3D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[4::-1] expected = TimedeltaIndex(['5 day', '4 day', '3 day', '2 day', '1 day'], freq='-1D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) def test_drop_duplicates_metadata(self): # GH 10115 idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') result = idx.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) idx_dup = idx.append(idx) self.assertIsNone(idx_dup.freq) # freq is reset result = idx_dup.drop_duplicates() self.assert_index_equal(idx, result) self.assertIsNone(result.freq) def test_drop_duplicates(self): # to check Index/Series compat base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') idx = base.append(base[:5]) res = idx.drop_duplicates() tm.assert_index_equal(res, base) res = Series(idx).drop_duplicates() tm.assert_series_equal(res, Series(base)) res = idx.drop_duplicates(keep='last') exp = base[5:].append(base[:5]) tm.assert_index_equal(res, exp) res = Series(idx).drop_duplicates(keep='last') tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36))) res = idx.drop_duplicates(keep=False) tm.assert_index_equal(res, base[5:]) res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) def test_take(self): # GH 10295 idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') for idx in [idx1]: result = idx.take([0]) self.assertEqual(result, pd.Timedelta('1 day')) result = idx.take([-1]) self.assertEqual(result, pd.Timedelta('31 day')) result = idx.take([0, 1, 2]) expected = pd.timedelta_range('1 day', '3 day', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([0, 2, 4]) expected = pd.timedelta_range('1 day', '5 day', freq='2D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([7, 4, 1]) expected = pd.timedelta_range('8 day', '2 day', freq='-3D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([3, 2, 5]) expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) result = idx.take([-3, 2, 5]) expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) def test_take_invalid_kwargs(self): idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') indices = [1, 6, 5, 9, 10, 13, 15, 3] msg = r"take\(\) got an unexpected keyword argument 'foo'" tm.assertRaisesRegexp(TypeError, msg, idx.take, indices, foo=2) msg = "the 'out' parameter is not supported" tm.assertRaisesRegexp(ValueError, msg, idx.take, indices, out=indices) msg = "the 'mode' parameter is not supported" tm.assertRaisesRegexp(ValueError, msg, idx.take, indices, mode='clip') def test_infer_freq(self): # GH 11018 for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S' ]: idx = pd.timedelta_range('1', freq=freq, periods=10) result = pd.TimedeltaIndex(idx.asi8, freq='infer') tm.assert_index_equal(idx, result) self.assertEqual(result.freq, freq) def test_nat_new(self): idx = pd.timedelta_range('1', freq='D', periods=5, name='x') result = idx._nat_new() exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x') tm.assert_index_equal(result, exp) result = idx._nat_new(box=False) exp = np.array([tslib.iNaT] * 5, dtype=np.int64) tm.assert_numpy_array_equal(result, exp) def test_shift(self): # GH 9903 idx = pd.TimedeltaIndex([], name='xxx') tm.assert_index_equal(idx.shift(0, freq='H'), idx) tm.assert_index_equal(idx.shift(3, freq='H'), idx) idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx') tm.assert_index_equal(idx.shift(0, freq='H'), idx) exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx') tm.assert_index_equal(idx.shift(3, freq='H'), exp) exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx') tm.assert_index_equal(idx.shift(-3, freq='H'), exp) tm.assert_index_equal(idx.shift(0, freq='T'), idx) exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'], name='xxx') tm.assert_index_equal(idx.shift(3, freq='T'), exp) exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'], name='xxx') tm.assert_index_equal(idx.shift(-3, freq='T'), exp) def test_repeat(self): index = pd.timedelta_range('1 days', periods=2, freq='D') exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days']) for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) self.assertIsNone(res.freq) index = TimedeltaIndex(['1 days', 'NaT', '3 days']) exp = TimedeltaIndex(['1 days', '1 days', '1 days', 'NaT', 'NaT', 'NaT', '3 days', '3 days', '3 days']) for res in [index.repeat(3), np.repeat(index, 3)]: tm.assert_index_equal(res, exp) self.assertIsNone(res.freq) def test_nat(self): self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT) self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT) idx = pd.TimedeltaIndex(['1 days', '2 days']) self.assertTrue(idx._can_hold_na) tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) self.assertFalse(idx.hasnans) tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp)) idx = pd.TimedeltaIndex(['1 days', 'NaT']) self.assertTrue(idx._can_hold_na) tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) self.assertTrue(idx.hasnans) tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp)) def test_equals(self): # GH 13107 idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT']) self.assertTrue(idx.equals(idx)) self.assertTrue(idx.equals(idx.copy())) self.assertTrue(idx.equals(idx.asobject)) self.assertTrue(idx.asobject.equals(idx)) self.assertTrue(idx.asobject.equals(idx.asobject)) self.assertFalse(idx.equals(list(idx))) self.assertFalse(idx.equals(pd.Series(idx))) idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT']) self.assertFalse(idx.equals(idx2)) self.assertFalse(idx.equals(idx2.copy())) self.assertFalse(idx.equals(idx2.asobject)) self.assertFalse(idx.asobject.equals(idx2)) self.assertFalse(idx.asobject.equals(idx2.asobject)) self.assertFalse(idx.equals(list(idx2))) self.assertFalse(idx.equals(pd.Series(idx2))) class TestTimedeltas(tm.TestCase): _multiprocess_can_split_ = True def test_ops(self): td = Timedelta(10, unit='d') self.assertEqual(-td, Timedelta(-10, unit='d')) self.assertEqual(+td, Timedelta(10, unit='d')) self.assertEqual(td - td, Timedelta(0, unit='ns')) self.assertTrue((td - pd.NaT) is pd.NaT) self.assertEqual(td + td, Timedelta(20, unit='d')) self.assertTrue((td + pd.NaT) is pd.NaT) self.assertEqual(td * 2, Timedelta(20, unit='d')) self.assertTrue((td * pd.NaT) is pd.NaT) self.assertEqual(td / 2, Timedelta(5, unit='d')) self.assertEqual(abs(td), td) self.assertEqual(abs(-td), td) self.assertEqual(td / td, 1) self.assertTrue((td / pd.NaT) is np.nan) # invert self.assertEqual(-td, Timedelta('-10d')) self.assertEqual(td * -1, Timedelta('-10d')) self.assertEqual(-1 * td, Timedelta('-10d')) self.assertEqual(abs(-td), Timedelta('10d')) # invalid self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2) # invalid multiply with another timedelta self.assertRaises(TypeError, lambda: td * td) # can't operate with integers self.assertRaises(TypeError, lambda: td + 2) self.assertRaises(TypeError, lambda: td - 2) def test_ops_offsets(self): td = Timedelta(10, unit='d') self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1)) self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td) self.assertEqual(240, td / pd.offsets.Hour(1)) self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td) self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1)) self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td) def test_ops_ndarray(self): td = Timedelta('1 day') # timedelta, timedelta other = pd.to_timedelta(['1 day']).values expected = pd.to_timedelta(['2 days']).values self.assert_numpy_array_equal(td + other, expected) if LooseVersion(np.__version__) >= '1.8': self.assert_numpy_array_equal(other + td, expected) self.assertRaises(TypeError, lambda: td + np.array([1])) self.assertRaises(TypeError, lambda: np.array([1]) + td) expected = pd.to_timedelta(['0 days']).values self.assert_numpy_array_equal(td - other, expected) if LooseVersion(np.__version__) >= '1.8': self.assert_numpy_array_equal(-other + td, expected) self.assertRaises(TypeError, lambda: td - np.array([1])) self.assertRaises(TypeError, lambda: np.array([1]) - td) expected = pd.to_timedelta(['2 days']).values self.assert_numpy_array_equal(td * np.array([2]), expected) self.assert_numpy_array_equal(np.array([2]) * td, expected) self.assertRaises(TypeError, lambda: td * other) self.assertRaises(TypeError, lambda: other * td) self.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64)) if LooseVersion(np.__version__) >= '1.8': self.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64)) # timedelta, datetime other = pd.to_datetime(['2000-01-01']).values expected = pd.to_datetime(['2000-01-02']).values self.assert_numpy_array_equal(td + other, expected) if LooseVersion(np.__version__) >= '1.8': self.assert_numpy_array_equal(other + td, expected) expected = pd.to_datetime(['1999-12-31']).values self.assert_numpy_array_equal(-td + other, expected) if LooseVersion(np.__version__) >= '1.8': self.assert_numpy_array_equal(other - td, expected) def test_ops_series(self): # regression test for GH8813 td = Timedelta('1 day') other = pd.Series([1, 2]) expected = pd.Series(pd.to_timedelta(['1 day', '2 days'])) tm.assert_series_equal(expected, td * other) tm.assert_series_equal(expected, other * td) def test_ops_series_object(self): # GH 13043 s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'), pd.Timestamp('2015-01-01', tz='Asia/Tokyo')], name='xxx') self.assertEqual(s.dtype, object) exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'), pd.Timestamp('2015-01-02', tz='Asia/Tokyo')], name='xxx') tm.assert_series_equal(s + pd.Timedelta('1 days'), exp) tm.assert_series_equal(pd.Timedelta('1 days') + s, exp) # object series & object series s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'), pd.Timestamp('2015-01-05', tz='Asia/Tokyo')], name='xxx') self.assertEqual(s2.dtype, object) exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')], name='xxx') tm.assert_series_equal(s2 - s, exp) tm.assert_series_equal(s - s2, -exp) s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')], name='xxx', dtype=object) self.assertEqual(s.dtype, object) exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')], name='xxx') tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp) tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp) def test_ops_notimplemented(self): class Other: pass other = Other() td = Timedelta('1 day') self.assertTrue(td.__add__(other) is NotImplemented) self.assertTrue(td.__sub__(other) is NotImplemented) self.assertTrue(td.__truediv__(other) is NotImplemented) self.assertTrue(td.__mul__(other) is NotImplemented) self.assertTrue(td.__floordiv__(td) is NotImplemented) def test_ops_error_str(self): # GH 13624 tdi = TimedeltaIndex(['1 day', '2 days']) for l, r in [(tdi, 'a'), ('a', tdi)]: with tm.assertRaises(TypeError): l + r with tm.assertRaises(TypeError): l > r with tm.assertRaises(TypeError): l == r with tm.assertRaises(TypeError): l != r def test_timedelta_ops(self): # GH4984 # make sure ops return Timedelta s = Series([Timestamp('20130101') + timedelta(seconds=i * i) for i in range(10)]) td = s.diff() result = td.mean() expected = to_timedelta(timedelta(seconds=9)) self.assertEqual(result, expected) result = td.to_frame().mean() self.assertEqual(result[0], expected) result = td.quantile(.1) expected = Timedelta(np.timedelta64(2600, 'ms')) self.assertEqual(result, expected) result = td.median() expected = to_timedelta('00:00:09') self.assertEqual(result, expected) result = td.to_frame().median() self.assertEqual(result[0], expected) # GH 6462 # consistency in returned values for sum result = td.sum() expected = to_timedelta('00:01:21') self.assertEqual(result, expected) result = td.to_frame().sum() self.assertEqual(result[0], expected) # std result = td.std() expected = to_timedelta(Series(td.dropna().values).std()) self.assertEqual(result, expected) result = td.to_frame().std() self.assertEqual(result[0], expected) # invalid ops for op in ['skew', 'kurt', 'sem', 'prod']: self.assertRaises(TypeError, getattr(td, op)) # GH 10040 # make sure NaT is properly handled by median() s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')]) self.assertEqual(s.diff().median(), timedelta(days=4)) s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'), Timestamp('2015-02-15')]) self.assertEqual(s.diff().median(), timedelta(days=6)) def test_timedelta_ops_scalar(self): # GH 6808 base = pd.to_datetime('20130101 09:01:12.123456') expected_add = pd.to_datetime('20130101 09:01:22.123456') expected_sub = pd.to_datetime('20130101 09:01:02.123456') for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10), np.timedelta64(10, 's'), np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)
pandas.offsets.Second
import pandas as pd import MDAnalysis as mda import numpy as np import matplotlib.pyplot as plt from enmspring.k_b0_util import get_df_by_filter_bp from enmspring.na_seq import sequences from enmspring.spring import Spring atomname_map = {'A': {'type1': 'N6', 'type2': 'N1', 'type3': 'C2'}, 'T': {'type1': 'O4', 'type2': 'N3', 'type3': 'O2'}, 'C': {'type1': 'N4', 'type2': 'N3', 'type3': 'O2'}, 'G': {'type1': 'O6', 'type2': 'N1', 'type3': 'N2'}} class InputException(Exception): pass class HBAgent: cutoff = 4.7 type_na = 'bdna+bdna' d_atcg = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'} def __init__(self, host, rootfolder, n_bp): self.rootfolder = rootfolder self.host = host self.n_bp = n_bp self.seq_guide = sequences[host]['guide'] self.df_hb = self.__read_df_hb() self.basepairs = None def initialize_basepair(self): basepairs = dict() for idx, resname_i in enumerate(self.seq_guide): resid_i = idx + 1 bp_obj = BasePair(resname_i, resid_i, self.df_hb) basepairs[resid_i] = bp_obj self.basepairs = basepairs def __read_df_hb(self): spring_obj = Spring(self.rootfolder, self.host, self.type_na, self.n_bp) df = spring_obj.read_k_b0_pairtype_df_given_cutoff(self.cutoff) df1 = get_df_by_filter_bp(df, 'hb') df2 = self.__read_df_at_type3() if len(df2) == 0: return df1 else: df3 =
pd.concat([df1,df2])
pandas.concat
import numpy as np import pandas as pd from scipy import optimize, special from sklearn import metrics def optimize_threshold_f1(outputs, labels): std = np.std(outputs) bounds = np.array([np.min(outputs), np.max(outputs)]) / std def fn(thresh): return -metrics.f1_score(labels, outputs >= std * thresh) res = optimize.minimize_scalar(fn, bounds=bounds) print("F1 score:", -res.fun, "Threshold:", res.x) return res.x def optimize_threshold_count(outputs, labels, target): std = np.std(outputs) bounds = np.array([np.min(outputs), np.max(outputs)]) / std song_len = len(outputs) * 512 / 44100 / 60 def fn(thresh): n_preds = np.sum(outputs >= std * thresh) return np.abs(n_preds - target * song_len) res = optimize.minimize_scalar(fn, bounds=bounds) print("Threshold:", res.x) return res.x def smooth_outputs(outputs, q=10): s =
pd.Series(outputs)
pandas.Series
import types from math import sqrt, log import numpy as np import pandas as pd from ....data.materials.CompositionEntry import CompositionEntry from ....data.materials.util.GCLPCalculator import GCLPCalculator class GCLPAttributeGenerator: """Class to compute features based on the T=0K ground state. Attributes ---------- GCLPCalculator : GCLPCalculator A GCLPCalculator instance. count_phases : bool Flag to include or exclude the number of phases at equilibrium. Notes ----- Features: 1. Formation energy. 2. Number of phases in equilibrium. 3. Distance from closest composition (i.e., ||x_i - x_{i,f}||_2 for each component i for phase f). 4. Average distance from all neighbors. 5. Quasi-entropy (sum x_i * ln(x_i) where x_i is fraction of phase). Certain values of the number of phases in equilibrium and "quasi-entropy" are only accessible to systems with larger number of elements. Useful if you do not want to consider the number of components in an alloy as a predictive variable. """ # Tool used to compute ground states. GCLPCalculator = None # Whether to include the number of phases at equilibrium. count_phases = True def set_phases(self, phases, energies): """Function to define phases used when computing ground states. Parameters ---------- phases : array-like Compositions to consider. A list of CompositionEntry's. energies : array-like Corresponding energies. A list of float values. """ self.GCLPCalculator = GCLPCalculator() self.GCLPCalculator.add_phases(phases, energies) def set_count_phases(self, count_phases): """Function to set variable to count number of phases at equilibrium. In some cases, you may want to exclude this as a feature because it is tied to the number of components in the compound. Parameters ---------- count_phases : bool Desired setting. """ self.count_phases = count_phases def generate_features(self, entries): """Function to generate features as mentioned in the class description. Parameters ---------- entries : array-like Compositions for which features are to be generated. A list of CompositionEntry's. Returns ---------- features : DataFrame Features for the given entries. Pandas data frame containing the names and values of the descriptors. Raises ------ ValueError If input is not of type list. If items in the list are not CompositionEntry instances. """ # Initialize lists of feature values and headers for pandas data frame. feat_values = [] feat_headers = [] # Raise exception if input argument is not of type list of # CompositionEntry's. if not isinstance(entries, list): raise ValueError("Argument should be of type list of " "CompositionEntry's") elif (entries and not isinstance(entries[0], CompositionEntry)): raise ValueError("Argument should be of type list of " "CompositionEntry's") # Check if the GCLP calculation has been defined. if not self.GCLPCalculator: raise ValueError("GCLP calculator has not been setup.") feat_headers.append("T0K:Enthalpy") if self.count_phases: feat_headers.append("T0K:NPhasesEquilibrium") feat_headers.append("T0K:ClosestPhaseDistance") feat_headers.append("T0K:MeanPhaseDistance") if self.count_phases: feat_headers.append("T0K:QuasiEntropy") for entry in entries: tmp_list = [] # Run GCLP. l,r = self.GCLPCalculator.run_GCLP(entry) # Compute formation energy. tmp_list.append(l) # Compute number of phases. if self.count_phases: tmp_list.append(len(r)) # Compute distances. phase_distances = [] elements = entry.get_element_ids() fractions = entry.get_element_fractions() for phase in r: dist = 0.0 for i,elem in enumerate(elements): diff = phase.get_element_fraction(id=elem) - fractions[i] dist += diff * diff phase_distances.append(sqrt(dist)) tmp_list.append(min(phase_distances)) tmp_list.append(np.mean(phase_distances)) # Compute quasi-entropy. if self.count_phases: entropy = 0.0 for f in r.values(): entropy += f * log(f) tmp_list.append(entropy) feat_values.append(tmp_list) features =
pd.DataFrame(feat_values, columns=feat_headers)
pandas.DataFrame
import numpy as np import pandas as pd from scipy.stats import norm, percentileofscore from tqdm.notebook import tqdm def rv_cc_estimator(sample,n=22): """ Realized volatility close to close calculation. Returns a time series of the realized volatility. sample: series or dataframe of closing prices indexed by date n: sample size period for the volatility """ sample_clean = sample.dropna() returns = np.divide(sample_clean, sample_clean.shift(1)) log_returns = np.log(returns) ann_log_returns = 252*np.power(log_returns,2)/n return 100 * np.sqrt(ann_log_returns.rolling(window=n,min_periods=n).sum()) def cc_estimator(sample,n=22,days=1): combined_rv =
pd.Series()
pandas.Series
import multiprocessing as mp import os import tempfile import shutil import dask.dataframe as dd import dask.diagnostics import genomepy from gimmemotifs.scanner import scan_regionfile_to_table from gimmemotifs.utils import pfmfile_location from loguru import logger import numpy as np import pandas as pd import pickle import pysam import qnorm from scipy import stats from sklearn.preprocessing import minmax_scale from ananse.utils import ( bed_sort, bed_merge, bam_index, bam_sort, mosdepth, ) from ananse.distributions import Distributions class CombineBedFiles: def __init__(self, genome, peakfiles, verbose=True): self.genome = genome self.list_of_peakfiles = ( peakfiles if isinstance(peakfiles, list) else [peakfiles] ) self.verbose = verbose @staticmethod def is_narrowpeak(bed, check_values=True): """ Check BED type by column count. Check if peak values are not all zeroes unless check_values is False. Accepts a BED file (including narrowPeak, broadPeak, etc.) Returns bool """ with open(bed) as b: for line in b: if line.startswith("#"): continue line = line.split("\t") cols = len(line) break # narrowPeak has 10 columns # and the peak column is >= 0 if cols != 10 or int(line[9]) < 0: return False if not check_values: return True # check if the peak values aren't all zeroes summit_values = 0 sample_size = 20 # check an arbitrary number of lines with open(bed) as b: for n, line in enumerate(b): if line.startswith("#"): continue line = line.split("\t") peak_val = int(line[9]) # value must be >=0 if peak_val < 0: return False summit_values += peak_val if n >= sample_size: break if summit_values > 0: return True return False @staticmethod def bed_resize( genome, bed_in, bed_out, width=200, narrowpeak=False, fix_outliers=False, output_bed3=True, verbose=True, ): """ Set bed region width. If the input bed is a narrowPeak file (narrowpeak=True), center region on the summit (start+peak). Otherwise center on the middle of the region. If fix_outliers is set to True, shift regions to fit their chromosomes. Otherwise drop these regions. If output_bed3 is set to False, output the whole bed file. """ half_seqlen = width // 2 chrom_sizes = genomepy.Genome(genome).sizes missing_chrm = [] if narrowpeak: def get_summit(_start, _, summit_offset): return _start + int(summit_offset) summit_col = 9 else: def get_summit(_start, _end, _): return (_start + _end) // 2 summit_col = 0 # unused with open(bed_in) as old, open(bed_out, "w") as new: for line in old: if line.startswith("#"): continue line = line.split("\t") chrm = str(line[0]) if chrm not in chrom_sizes.keys(): missing_chrm.append(chrm) continue start = int(line[1]) end = int(line[2]) rest = line[3:] if not output_bed3 else [] chrm_len = chrom_sizes[chrm] if width == end - start: nstart = str(start) nend = str(end) elif chrm_len <= width: if not fix_outliers: continue nstart = str(0) nend = str(chrm_len) else: summit = get_summit(start, end, line[summit_col]) if not fix_outliers: nstart = str(summit - half_seqlen) nend = str(summit + half_seqlen) if int(nstart) < 0 or int(nend) > chrm_len: continue else: # adjust the summit for the chromosome boundaries summit = max(summit, 0 + half_seqlen) summit = min(summit, chrm_len - half_seqlen) nstart = str(summit - half_seqlen) nend = str(summit + half_seqlen) new.write("\t".join([chrm, nstart, nend] + rest) + "\n") if missing_chrm and verbose: logger.warning( "The following contigs were present in " + f"'{os.path.basename(bed_in)}', " + "but were missing in the genome file: " + f"{', '.join(list(set(missing_chrm)))}\n" ) return bed_out def run(self, outfile, width=200, force=False): if force or not os.path.exists(outfile): if self.verbose: logger.info("Combining bed files") tmpdir = tempfile.mkdtemp(prefix="ANANSE_") try: list_of_beds = [] for peakfile in self.list_of_peakfiles: # use narrowPeak Peak location for region centering if possible is_np = self.is_narrowpeak(peakfile) resized_peakfile = os.path.join(tmpdir, os.path.basename(peakfile)) # resize each BED region to 200 BP self.bed_resize( genome=self.genome, bed_in=peakfile, bed_out=resized_peakfile, width=width, narrowpeak=is_np, verbose=self.verbose, ) bed_sort(resized_peakfile) list_of_beds.append(resized_peakfile) # merge resized beds into one merged_bed = os.path.join(tmpdir, "merged") bed_merge(list_of_beds=list_of_beds, merged_bed=merged_bed) shutil.copy2(merged_bed, outfile) finally: shutil.rmtree(tmpdir, ignore_errors=True) class ScorePeaks: def __init__(self, bams, bed, ncore=1, verbose=True): self.list_of_bams = bams if isinstance(bams, list) else [bams] self.bed = bed # one bed file with all putative enhancer binding regions self.verbose = verbose self.ncore = ncore def compatibility_check(self): """ Check if any chromosome in each bams file are found in the bed file. This filters out datasets mapped to different genomes. """ error = False bed_chromosomes = set( pd.read_csv(self.bed, sep="\t", header=None)[0].astype(str) ) for bam in self.list_of_bams: bam_header = pysam.view(bam, "-H").split("\n") # noqa: pysam bug for line in bam_header: if not line.startswith("@SQ"): continue # extract chrom (ex: '@SQ\tSN:chr11\tLN:100316') chrom = line.split("\tSN:")[1].split("\tLN:")[0] # if any chrom matches: next bam if chrom in bed_chromosomes: break else: logger.exception( f"Chromosomes in the peak file(s) do not match any in bam file '{os.path.basename(bam)}'!\n" f"Does {self.bed} contain any regions, and " "are both bam- and peak file(s) mapped to the same genome assembly?\n" ) error = True if error: exit(1) def peaks_count(self, outdir): """ count bam reads in the bed regions returns one bed file for each bam in outdir """ # linear script: # coverage_files = [] # for bam in self.list_of_bams: # bed_output = os.path.join(outdir, os.path.basename(bam).replace(".bam", ".regions.bed")) # coverage_files.append(bed_output) # mosdepth(self.bed, bam, bed_output, self.ncore) # return coverage_files # parallel script: nbams = len(self.list_of_bams) npool = min(self.ncore, nbams) ncore = min(4, self.ncore // npool) # 1-4 cores/bam # list with tuples. each tuple = one run mosdepth_params = [] coverage_files = [] for bam in self.list_of_bams: bed_output = os.path.join( outdir, os.path.basename(bam).replace(".bam", ".regions.bed") ) mosdepth_params.append((self.bed, bam, bed_output, ncore)) coverage_files.append(bed_output) pool = mp.Pool(npool) try: pool.starmap_async(mosdepth, mosdepth_params) finally: # To make sure processes are closed in the end, even if errors happen pool.close() pool.join() return coverage_files @staticmethod def peaks_merge(coverage_files, bed_output, ncore=1): """ averages all peaks_count outputs uses quantile normalization to normalize for read depth returns one BED 3+1 file """ ncore = min(4, ncore) bed = pd.read_csv(coverage_files[0], header=None, sep="\t") if len(coverage_files) > 1: for file in coverage_files[1:]: scores = pd.read_csv(file, header=None, sep="\t")[3] bed = pd.concat([bed, scores], axis=1) scores = bed.iloc[:, 3:] scores = qnorm.quantile_normalize(scores, axis=1, ncpus=ncore) scores = scores.mean(axis=1) bed = pd.concat([bed.iloc[:, :3], scores], axis=1) bed.to_csv(bed_output, sep="\t", header=False, index=False) @staticmethod def peaks_fit(bam_coverage, bed_output, dist_func="lognorm_dist", **kwargs): """ fit the peak scores to a distribution """ bed = pd.read_csv(bam_coverage, header=None, sep="\t") region = ( bed[0].astype(str) + ":" + bed[1].astype(str) + "-" + bed[2].astype(str) ) score = bed[3] # obtain a distribution dist_func = Distributions().set(dist_func) # with np.errstate(divide="ignore", invalid="ignore"): # dist = dist_func(score, **kwargs) dist = dist_func(score, **kwargs) # replace scores with distribution values ascending_dist = np.sort(dist) ascending_scores_index = np.searchsorted(np.sort(score), score) norm_score = np.array([ascending_dist[i] for i in ascending_scores_index]) logn_score = np.log(norm_score + 1) scaled_score = minmax_scale(logn_score) log10_score = np.log10(norm_score + 1) data = { "region": region, # ex: "chr1:0-200" "score": score, "norm_score": norm_score, "logn_score": logn_score, "scaled_score": scaled_score, "log10_score": log10_score, # used by the original function } bed = pd.DataFrame(data=data) bed.to_csv(bed_output, sep="\t", index=False) def run(self, outfile, dist_func="peak_rank_file_dist", force=False, **kwargs): # save the results as it takes ages to run raw_peak_scores = os.path.join(os.path.dirname(outfile), "raw_scoredpeaks.bed") if force or not os.path.exists(raw_peak_scores): self.compatibility_check() tmpdir = tempfile.mkdtemp(prefix="ANANSE_") try: if self.verbose: logger.info("Scoring peaks (slow)") try: # assumes sorted for bam in self.list_of_bams: bam_index(bam, force=False, ncore=self.ncore) coverage_files = self.peaks_count(tmpdir) except Exception: # sort, index & try again for bam in self.list_of_bams: bam_sort(bam, self.ncore) coverage_files = self.peaks_count(tmpdir) tmp_peak_scores = os.path.join(tmpdir, "raw_scoredpeaks.bed") self.peaks_merge(coverage_files, tmp_peak_scores, self.ncore) shutil.copy2(tmp_peak_scores, raw_peak_scores) finally: shutil.rmtree(tmpdir, ignore_errors=True) # fit bam read counts to specified distribution if force or not os.path.exists(outfile): self.peaks_fit(raw_peak_scores, outfile, dist_func=dist_func, **kwargs) class ScoreMotifs: def __init__(self, genome, bed, pfmfile=None, ncore=1, verbose=True): self.genome = genome self.bed = bed # putative enhancer regions in format chr:start-end (in column 0 with header) self.pfm_file = pfmfile_location(pfmfile) self.ncore = ncore self.verbose = verbose def motifs_get_scores(self, pfmscorefile, debug=False): """ Scan for TF binding motifs in potential enhancer regions. """ if not debug: df = scan_regionfile_to_table( input_table=self.bed, genome=self.genome, scoring="score", pfmfile=self.pfm_file, ncpus=self.ncore, zscore=True, gc=True, ) else: # test output df = pd.DataFrame( { "region": ["chr1:400-600", "chr1:2400-2600", "chr1:10003-10203"], "GM.5.0.Sox.0001": [-0.544, -2.496, -0.544], "GM.5.0.Homeodomain.0001": [-0.750, -0.377, -7.544], } ).set_index("region") df["motif"] = df.idxmax(axis=1) df["zscore"] = df.max(axis=1) df.reset_index(inplace=True) df.to_csv( pfmscorefile, sep="\t", header=True, index=False, columns=["motif", "region", "zscore"], # filter + order columns ) @staticmethod def motifs_normalize(bed_input, bed_output): """ Add normalized scores to the scored motifs """ bed =
pd.read_csv(bed_input, sep="\t")
pandas.read_csv
#!/usr/bin/env python # coding: utf-8 # # Unit 5 - Financial Planning # # In[1]: # Initial imports import os import requests import pandas as pd from dotenv import load_dotenv import alpaca_trade_api as tradeapi from MCForecastTools import MCSimulation get_ipython().run_line_magic('matplotlib', 'inline') # In[2]: # Load .env enviroment variables load_dotenv() # ## Part 1 - Personal Finance Planner # ### Collect Crypto Prices Using the `requests` Library # In[3]: # Set current amount of crypto assets my_btc = 1.2 my_eth = 5.3 # In[4]: # Crypto API URLs btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=CAD" eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=CAD" # In[7]: def get_crypto_data(url): headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"} response = requests.get(url,headers=headers) return response.json()["data"] # In[12]: btc_data = get_crypto_data(btc_url) btc_data # In[15]: eth_data = get_crypto_data(eth_url) eth_data # In[18]: # Fetch current BTC price btc_price = btc_data['1']['quotes']['USD']['price'] # Fetch current ETH price eth_price = eth_data['1027']['quotes']['USD']['price'] # Compute current value of my crpto my_btc_value = btc_price * my_btc my_eth_value = eth_price * my_eth # Print current crypto wallet balance print(f"The current value of your {my_btc} BTC is ${my_btc_value:0.2f}") print(f"The current value of your {my_eth} ETH is ${my_eth_value:0.2f}") # ### Collect Investments Data Using Alpaca: `SPY` (stocks) and `AGG` (bonds) # In[19]: # Current amount of shares my_agg = 200 my_spy = 50 # In[23]: # Set Alpaca API key and secret alpaca_api_key = os.getenv("ALPACA_API_KEY") alpaca_secret_key = os.getenv("ALPACA_API_SECRET") # Create the Alpaca API object alpaca = tradeapi.REST(alpaca_api_key,alpaca_secret_key,api_version="v2") # In[24]: # Format current date as ISO format today = pd.Timestamp("2021-05-14", tz="America/New_York").isoformat() # Set the tickers tickers = ["AGG", "SPY"] # Set timeframe to '1D' for Alpaca API timeframe = "1D" # Get current closing prices for SPY and AGG df_portfolio = alpaca.get_barset( tickers, timeframe, start = today, end = today ).df # Preview DataFrame df_portfolio.head() # In[25]: # Pick AGG and SPY close prices agg_close_price = float(df_portfolio["AGG"]["close"]) spy_close_price = float(df_portfolio["SPY"]["close"]) # Print AGG and SPY close prices print(f"Current AGG closing price: ${agg_close_price}") print(f"Current SPY closing price: ${spy_close_price}") # In[26]: # Compute the current value of shares my_spy_value = spy_close_price * my_spy my_agg_value = agg_close_price * my_agg # Print current value of share print(f"The current value of your {my_spy} SPY shares is ${my_spy_value:0.2f}") print(f"The current value of your {my_agg} AGG shares is ${my_agg_value:0.2f}") # ### Savings Health Analysis # In[35]: # Set monthly household income monthly_income = 12000 # Create savings DataFrame total_crypto = my_btc_value + my_eth_value total_shares = my_spy_value + my_agg_value value_data = { "amount": { "crypto": total_crypto, "shares": total_shares } } df_savings = pd.DataFrame(value_data) df_savings # Display savings DataFrame display(df_savings) # In[36]: # Plot savings pie chart df_savings.plot.pie(y="amount", title="Composition of Personal Saving") # In[37]: # Set ideal emergency fund emergency_fund = monthly_income * 3 # Calculate total amount of savings total_savings = total_crypto + total_shares # Validate saving health if (total_savings > emergency_fund): print("Congratulations! You have enough money in this fund") elif (total_savings == emergency_fund): print("Congratualtions on reaching this financial goal!") else: diff = emergency_fund - total_savings print(f"You are ${diff} away from reaching your financial goal") # ## Part 2 - Retirement Planning # # ### Monte Carlo Simulation # In[42]: # Set start and end dates of five years back from today. # Sample results may vary from the solution based on the time frame chosen start_date =
pd.Timestamp('2015-05-14', tz='America/New_York')
pandas.Timestamp
"""All functions that are not so useful, but still useful.""" from collections import Counter from collections import OrderedDict from collections import defaultdict import errno import itertools import math import os import re import sys import ntpath import pickle import subprocess from scipy import stats import numpy as np import pandas as pd import six import pybedtools import pysam import pyBigWig from bx.intervals.intersection import IntervalTree import warnings from .interval import Interval # Unmapped, Unmapped+Reverse strand, Not primary alignment, # Not primary alignment + reverse strand, supplementary alignment # Source: https://broadinstitute.github.io/picard/explain-flags.html __SAM_NOT_UNIQ_FLAGS__ = [4, 20, 256, 272, 2048] CBB_PALETTE = [ "#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", ] def order_dataframe(df, columns): """Order a dataframe Order a dataframe by moving the `columns` in the front Parameters ---------- df: Dataframe Dataframe columns: list List of columns that need to be put in front """ if isinstance(columns, six.string_types): columns = [columns] # let the command take a string or list remaining_columns = [w for w in df.columns if w not in columns] df = df[columns + remaining_columns] return df def _fix_bed_coltype(bed): """Fix bed chrom and name columns to be string This is necessary since the chromosome numbers are often interpreted as int """ bed["chrom"] = bed["chrom"].astype(str) bed["name"] = bed["name"].astype(str) return bed def check_file_exists(filepath): """Check if file exists. Parameters ---------- filepath : str Path to file """ if os.path.isfile(os.path.abspath(filepath)): return True return False def list_to_ranges(list_of_int): """Convert a list to a list of range object Parameters ---------- list_of_int: list List of integers to be squeezed into range Returns ------- list_of_range: list List of range objects """ sorted_list = sorted(set(list_of_int)) for key, group in itertools.groupby(enumerate(sorted_list), lambda x: x[1] - x[0]): group = list(group) yield group[0][1], group[-1][1] def create_ideal_periodic_signal(signal_length): """Create ideal ribo-seq signal. Parameters ---------- signal_length : int Length of signal to create Returns ------- signal : array_like 1-0-0 signal """ uniform_signal = np.array([4 / 6.0] * signal_length) uniform_signal[list(range(1, len(uniform_signal), 3))] = 1 / 6.0 uniform_signal[list(range(2, len(uniform_signal), 3))] = 1 / 6.0 return uniform_signal def identify_peaks(coverage): """Given coverage array, find the site of maximum density""" return np.argmax(coverage[list(range(-18, -10))]) def millify(n): """Convert integer to human readable format. Parameters ---------- n : int Returns ------- millidx : str Formatted integer """ if n is None or np.isnan(n): return "NaN" millnames = ["", " K", " M", " B", " T"] # Source: http://stackoverflow.com/a/3155023/756986 n = float(n) millidx = max( 0, min( len(millnames) - 1, int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)) ), ) return "{:.1f}{}".format(n / 10 ** (3 * millidx), millnames[millidx]) def mkdir_p(path): """Python version mkdir -p Parameters ---------- path : str """ if path: try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def symlink_force(source, destination): """Create forcelink forcefully Parameters ---------- source: string Location to source file destination: string Location to target """ try: os.symlink(source, destination) except OSError as exc: if exc.errno == errno.EEXIST: os.remove(destination) os.symlink(source, destination) else: raise exc def r2(x, y): """Calculate pearson correlation between two vectors. Parameters ---------- x : array_like Input y : array_like Input """ return stats.pearsonr(x, y)[0] ** 2 def round_to_nearest(x, base=5): """Round to nearest base. Parameters ---------- x : float Input Returns ------- v : int Output """ return int(base * round(float(x) / base)) def set_xrotation(ax, degrees): """Rotate labels on x-axis. Parameters ---------- ax : matplotlib.Axes Axes object degrees : int Rotation degrees """ for i in ax.get_xticklabels(): i.set_rotation(degrees) def summary_stats_two_arrays_welch( old_mean_array, new_array, old_var_array=None, old_n_counter=None, carried_forward_observations=None, ): """Average two arrays using welch's method Parameters ---------- old_mean_array : Series Series of previous means with index as positions old_var_array : Series Series of previous variances with index as positions new_array : array like Series of new observations (Does noes Ciunts of number of positions at a certain index Returns ------- m : array like Column wise Mean array var : array like Column wise variance Consider an example: [1,2,3], [1,2,3,4], [1,2,3,4,5] old = [1,2,3] new = [1,2,3,4] counter = [1,1,1] mean = [1,2,3,4] Var =[na, na, na, na], carried_fowrad = [[1,1], [2,2], [3,3], [4]] old = [1,2,3,4] new = [1,2,3,4,5] couter = [2,2,2,1] mean = [1,2,3,4,5] var = [0,0,0, na, na] carried_forward = [[], [], [], [4,4], [5]] """ if not isinstance(old_mean_array, pd.Series): old_mean_array = pd.Series(old_mean_array) if not isinstance(new_array, pd.Series): new_array = pd.Series(new_array) if old_n_counter is not None and not isinstance(old_n_counter, pd.Series): old_n_counter = pd.Series(old_n_counter) len_old, len_new = len(old_mean_array), len(new_array) if old_n_counter is None: # Initlaized from current series old_n_counter = pd.Series( np.zeros(len(old_mean_array)) + 1, index=old_mean_array.index ) if old_var_array is None: # Initlaized from current series old_var_array = pd.Series( np.zeros(len(old_mean_array)) + np.nan, index=old_mean_array.index ) # Update positions counts based on new_array new_n_counter = old_n_counter.add( pd.Series(np.zeros(len(new_array)) + 1, index=new_array.index), fill_value=0 ) if len_old > len_new: len_diff = len_old - len_new # Pad the incoming array # We append NAs to the end of new_array since it will mostly be in the metagene context max_index = np.max(new_array.index.tolist()) new_index = np.arange(max_index + 1, max_index + 1 + len_diff) new_array = new_array.append( pd.Series(np.zeros(len_diff) + np.nan, index=new_index), verify_integrity=True, ) elif len_old < len_new: len_diff = len_new - len_old # Pad the old array if len_old == 0: old_mean_array = pd.Series([]) else: max_index = np.max(old_mean_array.index.tolist()) new_index = np.arange(max_index + 1, max_index + 1 + len_diff) old_mean_array = old_mean_array.append( pd.Series(np.zeros(len_diff) + np.nan, index=new_index), verify_integrity=True, ) if not (old_mean_array.index == new_array.index).all(): print("old array index: {}".format(old_mean_array)) print("new array index: {}".format(new_array)) positions_with_less_than3_obs = defaultdict(list) for index, counts in six.iteritems(new_n_counter): # Which positions has <3 counts for calculating variance if counts <= 3: # Fetch the exact observations from history try: last_observations = carried_forward_observations[index] except: # No carreid forward passed if not np.isnan(old_mean_array[index]): last_observations = [old_mean_array[index]] else: last_observations = [] # Add entry from new_array only if it is not NAN if not np.isnan(new_array[index]): last_observations.append(new_array[index]) positions_with_less_than3_obs[index] = last_observations # positions_with_less_than3_obs = pd.Series(positions_with_less_than3_obs) # delta = x_n - mean(x_{n-1}) delta = new_array.subtract(old_mean_array) """ for index, value in six.iteritems( delta ): if np.isnan(value): if not np.isnan(old_mean_array[index]): delta[index] = old_mean_array[index] else: delta[index] = new_array[index] """ # delta = delta/n delta_normalized = delta.divide(new_n_counter) # mean(x_n) = mean(x_{n-1}) + delta/n new_mean_array = old_mean_array.add(delta_normalized) for index, value in six.iteritems(new_mean_array): if np.isnan(value): if not np.isnan(old_mean_array[index]): new_mean_array[index] = old_mean_array[index] else: new_mean_array[index] = new_array[index] # print(delta) # print(new_n_counter) # print(delta_normalized) # print(new_mean_array) # mean_difference_current = x_n - mean(x_n) # mean_difference_previous = x_n - mean(x_{n-1}) mean_difference_current = new_array.fillna(0) - new_mean_array.fillna(0) mean_difference_previous = new_array.fillna(0) - old_mean_array.fillna(0) # (x_n-mean(x_n))(x_n-mean(x_{n-1}) product = np.multiply(mean_difference_current, mean_difference_previous) # (n-1)S_n^2 - (n-2)S_{n-1}^2 = (x_n-mean(x_n)) (x_n-mean(x_{n-1})) # old_ssq = (n-1)S_{n-1}^2 # (n-2)S_{n-1}^2 old_sum_of_sq = (old_n_counter - 2).multiply(old_var_array.fillna(0)) # new_ssq = (old_ssq + product) # (n-1) S_n^2 new_sum_of_sq = old_sum_of_sq + product # if counts is less than 3, set sum of sq to NA new_sum_of_sq[new_n_counter < 3] = np.nan # if counts just became 3, compute the variance for index, counts in six.iteritems(new_n_counter): if counts == 3: observations = positions_with_less_than3_obs[index] variance = np.var(observations) print(index, variance) new_sum_of_sq[index] = variance # delete it from the history del positions_with_less_than3_obs[index] new_var_array = new_sum_of_sq.divide(new_n_counter - 1) new_var_array[new_var_array == np.inf] = np.nan new_var_array[new_n_counter < 3] = np.nan """ for index, counts in six.iteritems(new_n_counter): if counts < 3: if not np.isnan(new_array[index]): if index not in list(positions_with_less_than3_obs.keys()): positions_with_less_than3_obs[index] = list() assert index in positions_with_less_than3_obs.keys() positions_with_less_than3_obs[index].append(new_array[index]) """ return new_mean_array, new_var_array, new_n_counter, positions_with_less_than3_obs def path_leaf(path): """Get path's tail from a filepath""" head, tail = ntpath.split(path) return tail or ntpath.basename(head) def parse_star_logs(infile, outfile=None): """Parse star logs into a dict Parameters ---------- infile : str Path to starlogs.final.out file Returns ------- star_info : dict Dict with necessary records parsed """ ANNOTATIONS = [ "total_reads", "uniquely_mapped", "uniquely_mapped_percent", "multi_mapped_percent", "unmapped_percent", "multi_mapped", ] star_info = OrderedDict() with open(infile) as fh: for line in fh: line = line.strip() if line.startswith("Number of input reads"): star_info[ANNOTATIONS[0]] = int(line.strip().split("\t")[1]) elif line.startswith("Uniquely mapped reads number"): star_info[ANNOTATIONS[1]] = int(line.strip().split("\t")[1]) elif line.startswith("Uniquely mapped reads %"): star_info[ANNOTATIONS[2]] = round( float(line.strip("%").split("\t")[1]), 2 ) elif line.startswith("Number of reads mapped to multiple loci"): star_info[ANNOTATIONS[5]] = int(line.strip().split("\t")[1]) elif line.startswith("Number of reads mapped to too many loci"): star_info[ANNOTATIONS[5]] += int(line.strip().split("\t")[1]) elif line.startswith("% of reads mapped to multiple loci"): star_info[ANNOTATIONS[3]] = round( float(line.strip("%").split("\t")[1]), 2 ) elif line.startswith("% of reads mapped to too many loci"): star_info[ANNOTATIONS[3]] += round( float(line.strip("%").split("\t")[1]), 2 ) elif line.startswith("% of reads unmapped: too many mismatches"): star_info[ANNOTATIONS[4]] = round( float(line.strip("%").split("\t")[1]), 2 ) elif line.startswith("% of reads unmapped: too short"): star_info[ANNOTATIONS[4]] += round( float(line.strip("%").split("\t")[1]), 2 ) elif line.startswith("% of reads unmapped: other"): star_info[ANNOTATIONS[4]] += round( float(line.strip("%").split("\t")[1]), 2 ) star_info = {key: round(star_info[key], 2) for key in list(star_info.keys())} if outfile is None: return star_info filename = path_leaf(infile) filename = filename.strip("Log.final.out") counts_df = pd.DataFrame.from_dict(star_info, orient="index").T counts_df.index = [filename] if outfile: counts_df.to_csv(outfile, sep=str("\t"), index=True, header=True) return counts_df def get_strandedness(filepath): """Parse output of infer_experiment.py from RSeqC to get strandedness. Parameters ---------- filepath : str Path to infer_experiment.py output Returns ------- strandedness : str reverse or forward or none """ with open(filepath) as f: data = f.read() splitted = [x.strip() for x in data.split("\n") if len(x.strip()) >= 1] assert splitted[0] == "This is SingleEnd Data" fwd_percentage = None rev_percentage = None for line in splitted[1:]: if "Fraction of reads failed to determine:" in line: continue elif 'Fraction of reads explained by "++,--":' in line: fwd_percentage = float(line.split(":")[1]) elif 'Fraction of reads explained by "+-,-+":' in line: rev_percentage = float(line.split(":")[1]) assert rev_percentage is not None assert fwd_percentage is not None ratio = fwd_percentage / rev_percentage if np.isclose([ratio], [1]): return "none" elif ratio >= 0.5: return "forward" else: return "reverse" def load_pickle(filepath): """Read pickled files easy in Python 2/3""" if ".tsv" in filepath: raise IndexError if sys.version_info > (3, 0): pickled = pickle.load(open(filepath, "rb"), encoding="latin1") else: pickled = pickle.load(open(filepath, "rb")) return pickled def pad_or_truncate(some_list, target_len): """Pad or truncate a list upto given target length Parameters ---------- some_list : list Input list target_length : int Final length of list If being extended, returns list padded with NAs. """ return some_list[:target_len] + [np.nan] * (target_len - len(some_list)) def pad_five_prime_or_truncate(some_list, offset_5p, target_len): """Pad first the 5prime end and then the 3prime end or truncate Parameters ---------- some_list : list Input list offset_5p : int 5' offset target_length : int Final length of list If being extended, returns list padded with NAs. """ some_list = list(some_list) padded_5p = [np.nan] * offset_5p + some_list return padded_5p[:target_len] + [np.nan] * (target_len - len(padded_5p)) def codon_to_anticodon(codon): """Codon to anticodon. Parameters ---------- codon : string Input codon """ pairs = {"A": "T", "C": "G", "T": "A", "G": "C", "N": "N"} return "".join(pairs[c] for c in codon)[::-1] def merge_intervals( intervals, chromosome_lengths=None, offset_5p=0, offset_3p=0, zero_based=True ): """Collapse intervals into non overlapping manner Parameters ---------- intervals : list of Interval chromosome_lengths : dict A map of each chromosome'e length Only used with offset_3p, offset_5p>0 offset_5p : int (positive) Number of bases to count upstream (5') offset_3p : int (positive) Number of bases to count downstream (3') zero_based: bool Indicate if the intervals are zero-based True means zero-based half open False means one-based full closed Returns ------- interval_combined : list of Interval sorted by the start A merged version of intervals This is useful when the annotations are overlapping. Example: chr1 310 320 gene1 + chr1 319 324 gene1 + Returns: chr1 310 324 gene1 + gene_offset_5p: Gene wise 5 prime offset This might be different from `offset_5p` in cases where `offset_5p` leads to a negative coordinate gene_offset_3p: Gene wise 3 prime offset This might be different from `offset_3p` in cases where `offset_3p` leads to position beyond chromsome length """ if not intervals: return ([], offset_5p, offset_3p) chroms = list(set([i.chrom for i in intervals])) strands = list(set([i.strand for i in intervals])) if len(chroms) != 1: sys.stderr.write("Error: chromosomes should be unique") return ([], offset_5p, offset_3p) if len(strands) != 1: sys.stderr.write("Error: strands should be unique") return ([], offset_5p, offset_3p) chrom = chroms[0] strand = strands[0] # Sort intervals by start intervals.sort(key=lambda x: x.start) # Find first interval first_interval = intervals[0] # Find last interval last_interval = intervals[-1] for i in intervals: if i.end > last_interval.end: last_interval = i if offset_5p != 0 or offset_3p != 0: if str(chrom) in chromosome_lengths: chrom_length = chromosome_lengths[str(chrom)] else: warnings.warn("Chromosome {} does not exist".format(chrom), UserWarning) chrom_length = np.inf else: chrom_length = np.inf if zero_based: lower_bound = 0 else: lower_bound = 1 upper_bound = chrom_length if strand == "+": if first_interval.start - offset_5p >= lower_bound: first_interval.start -= offset_5p gene_offset_5p = offset_5p else: gene_offset_5p = first_interval.start - lower_bound first_interval.start = lower_bound if last_interval.end + offset_3p <= upper_bound: last_interval.end += offset_3p gene_offset_3p = offset_3p else: gene_offset_3p = upper_bound - last_interval.end last_interval.end = upper_bound else: if last_interval.end + offset_5p <= upper_bound: last_interval.end += offset_5p gene_offset_5p = offset_5p else: gene_offset_5p = upper_bound - last_interval.end last_interval.end = upper_bound if first_interval.start - offset_3p >= lower_bound: first_interval.start -= offset_3p gene_offset_3p = offset_3p else: gene_offset_3p = first_interval.start - lower_bound first_interval.start = lower_bound # Merge overlapping intervals to_merge = Interval(chrom, first_interval.start, first_interval.end, strand) intervals_combined = [] for i in intervals: if i.start <= to_merge.end: to_merge.end = max(to_merge.end, i.end) else: intervals_combined.append(to_merge) to_merge = Interval(chrom, i.start, i.end, strand) intervals_combined.append(to_merge) return (intervals_combined, gene_offset_5p, gene_offset_3p) def summarize_counters(samplewise_dict): """Summarize gene counts for a collection of samples. Parameters ---------- samplewise_dict : dict A dictionary with key as sample name and value as another dictionary of counts for each gene Returns ------- totals : dict A dictionary with key as sample name and value as total gene count """ totals = {} for key, sample_dict in six.iteritems(samplewise_dict): totals[key] = np.nansum([np.nansum(d) for d in list(sample_dict.values)]) return totals def complementary_strand(strand): """Get complementary strand Parameters ---------- strand: string +/- Returns ------- rs: string -/+ """ if strand == "+": return "-" elif strand == "-": return "+" else: raise ValueError("Not a valid strand: {}".format(strand)) def read_refseq_bed(filepath): """Read refseq bed12 from UCSC. Parameters ---------- filepath: string Location to bed12 Returns ------- refseq: dict dict with keys as gene name and values as intervaltree """ refseq = defaultdict(IntervalTree) with open(filepath, "r") as fh: for line in fh: line = line.strip() if line.startswith(("#", "track", "browser")): continue fields = line.split("\t") chrom, tx_start, tx_end, name, score, strand = fields[:6] tx_start = int(tx_start) tx_end = int(tx_end) refseq[chrom].insert(tx_start, tx_end, strand) return refseq def read_bed_as_intervaltree(filepath): """Read bed as interval tree Useful for reading start/stop codon beds Parameters ---------- filepath: string Location to bed Returns ------- bedint_tree: dict dict with keys as gene name and strand as intervaltree """ bed_df = pybedtools.BedTool(filepath).sort().to_dataframe() bed_df["chrom"] = bed_df["chrom"].astype(str) bed_df["name"] = bed_df["name"].astype(str) bed_grouped = bed_df.groupby("chrom") bedint_tree = defaultdict(IntervalTree) for chrom, df in bed_grouped: df_list = list(zip(df["start"], df["end"], df["strand"])) for start, end, strand in df_list: bedint_tree[chrom].insert(start, end, strand) return bedint_tree def read_chrom_sizes(filepath): """Read chr.sizes file sorted by chromosome name Parameters ---------- filepath: string Location to chr.sizes Returns ------- chrom_lengths: list of tuple A list of tuples with chromsome name and their size """ chrom_lengths = [] with open(filepath, "r") as fh: for line in fh: chrom, size = line.strip().split("\t") chrom_lengths.append((chrom, int(size))) chrom_lengths = list(sorted(chrom_lengths, key=lambda x: x[0])) def create_bam_index(bam): """Create bam index. Parameters ---------- bam : str Path to bam file """ if isinstance(bam, pysam.AlignmentFile): bam = bam.filename if not os.path.exists("{}.bai".format(bam)): pysam.index(bam) def is_read_uniq_mapping(read): """Check if read is uniquely mappable. Parameters ---------- read : pysam.Alignment.fetch object Most reliable: ['NH'] tag """ # Filter out secondary alignments if read.is_secondary: return False tags = dict(read.get_tags()) try: nh_count = tags["NH"] except KeyError: # Reliable in case of STAR if read.mapping_quality == 255: return True if read.mapping_quality < 1: return False # NH tag not set so rely on flags if read.flag in __SAM_NOT_UNIQ_FLAGS__: return False else: raise RuntimeError("Malformed BAM?") if nh_count == 1: return True return False def find_first_non_none(positions): """Given a list of positions, find the index and value of first non-none element. This method is specifically designed for pysam, which has a weird way of returning the reference positions. If they are mismatched/softmasked it returns None when fetched using get_reference_positions. query_alignment_start and query_alignment_end give you indexes of position in the read which technically align, but are not softmasked i.e. it is set to None even if the position does not align Parameters ---------- positions: list of int Positions as returned by pysam.fetch.get_reference_positions Return ------ index: int Index of first non-None value position: int Value at that index """ for idx, position in enumerate(positions): if position is not None: return idx, position def find_last_non_none(positions): """Given a list of positions, find the index and value of last non-none element. This function is similar to the `find_first_non_none` function, but does it for the reversed list. It is specifically useful for reverse strand cases Parameters ---------- positions: list of int Positions as returned by pysam.fetch.get_reference_positions Return ------ index: int Index of first non-None value position: int Value at that index """ return find_first_non_none(positions[::-1]) # NOTE: We can in principle do a longer metagene anaylsis # using this helper funciont def yield_intervals(chrom_size, chunk_size=20000): for start in np.arange(0, chrom_size, chunk_size): end = start + chunk_size if end > chrom_size: yield (start, chrom_size) else: yield (start, end) def bwsum(bw, chunk_size=5000, scale_to=1e6): bw_sum = 0 if isinstance(bw, six.string_types): bw = pyBigWig.open(bw) chrom_sizes = bw.chroms() for chrom, chrom_size in six.iteritems(chrom_sizes): for start, end in yield_intervals(chrom_size, chunk_size): bw_sum += np.nansum(bw.values(chrom, start, end)) scale_factor = 1 / (bw_sum / scale_to) return bw_sum, scale_factor def scale_bigwig(inbigwig, chrom_sizes, outbigwig, scale_factor=1): """Scale a bigwig by certain factor. Parameters ---------- inbigwig: string Path to input bigwig chrom_sizes: string Path to chrom.sizes file outbigwig: string Path to output bigwig scale_factor: float Scale by value """ wigfile = os.path.abspath("{}.wig".format(outbigwig)) chrom_sizes = os.path.abspath(chrom_sizes) inbigwig = os.path.abspath(inbigwig) outbigwig = os.path.abspath(outbigwig) if os.path.isfile(wigfile): # wiggletools errors if the file already exists os.remove(wigfile) cmds = ["wiggletools", "write", wigfile, "scale", str(scale_factor), inbigwig] try: p = subprocess.Popen( cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, ) stdout, stderr = p.communicate() rc = p.returncode if rc != 0: raise RuntimeError( "Error running wiggletools.\nstdout : {} \n stderr : {}".format( stdout, stderr ) ) except FileNotFoundError: raise FileNotFoundError( "wiggletool not found on the path." "Use `conda install wiggletools`" ) cmds = ["wigToBigWig", wigfile, chrom_sizes, outbigwig] try: p = subprocess.Popen( cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, ) stdout, stderr = p.communicate() rc = p.returncode if rc != 0: raise RuntimeError( "Error running wigToBigWig.\nstdout : {} \n stderr : {}".format( stdout, stderr ) ) os.remove(wigfile) except FileNotFoundError: raise FileNotFoundError( "wigToBigwig not found on the path. This is an external " "tool from UCSC which can be downloaded from " "http://hgdownload.soe.ucsc.edu/admin/exe/. Alternatatively, use " "`conda install ucsc-wigtobigwig`" ) def get_region_sizes(region_bed): """Get summed up size of a CDS/UTR region from bed file Parameters ---------- region_bed: string Input bed file Returns ------- region_sizes: pd.Series Series with region name as index and size as key """ if isinstance(region_bed, six.string_types): region_bed = pybedtools.BedTool(region_bed).to_dataframe() region_bed_grouped = region_bed.groupby("name") region_sizes = {} for gene_name, gene_group in region_bed_grouped: ## Get rid of trailing dots gene_name = re.sub(r"\.[0-9]+", "", gene_name) # Collect all intervals at once intervals = list( zip( gene_group["chrom"], gene_group["start"], gene_group["end"], gene_group["strand"], ) ) for interval in intervals: if gene_name not in region_sizes: # End is always 1-based so does not require +1 region_sizes[gene_name] = interval[2] - interval[1] else: region_sizes[gene_name] += interval[2] - interval[1] return
pd.Series(region_sizes)
pandas.Series
""" We want to simplify the operations for pandas dataframes assuming we are using timeseries as the main objects. When we have multiple timeseries, we will: 1) calculate joint index using df_index() 2) reindex each timeseries to the joint index We then need to worry about multiple columns if there are. If none, each timeseries will be considered as pd.Series If there are multiple columns, we will perform the calculations columns by columns. """ from pyg_base._types import is_df, is_str, is_num, is_tss, is_int, is_arr, is_ts, is_arrs, is_tuples, is_pd from pyg_base._dictable import dictable from pyg_base._as_list import as_list from pyg_base._zip import zipper from pyg_base._reducer import reducing, reducer from pyg_base._decorators import wrapper from pyg_base._loop import loop from pyg_base._dates import dt import pandas as pd import numpy as np from copy import copy import inspect import datetime from operator import add, mul __all__ = ['df_fillna', 'df_index', 'df_reindex', 'df_columns', 'presync', 'np_reindex', 'nona', 'df_slice', 'df_unslice', 'min_', 'max_', 'add_', 'mul_', 'sub_', 'div_', 'pow_'] def _list(values): """ >>> assert _list([1,2,[3,4,5,[6,7]],dict(a =[8,9], b=[10,[11,12]])]) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] >>> assert _list(1) == [1] >>> assert _list(dict(a=1, b=2)) == [1,2] """ if isinstance(values, list): return sum([_list(df) for df in values], []) elif isinstance(values, dict): return _list(list(values.values())) else: return [values] @loop(list, tuple, dict) def _index(ts): if isinstance(ts, pd.Index): return ts elif is_pd(ts): return ts.index elif is_arr(ts): return len(ts) else: raise ValueError('did not provide an index') def _df_index(indexes, index): if len(indexes) > 0: if is_str(index): if index[0].lower() == 'i':#nner return reducing('intersection')(indexes) elif index[0].lower() == 'o':#uter return reducing('union')(indexes) elif index[0].lower() == 'l':#uter return indexes[0] elif index[0].lower() == 'r':#uter return indexes[-1] else: return _index(index) else: return None def _np_index(indexes, index): if len(indexes) > 0: if index[0].lower() == 'i':#nner return min(indexes) elif index[0].lower() == 'o':#uter return max(indexes) elif index[0].lower() == 'l':#uter return indexes[0] elif index[0].lower() == 'r':#uter return indexes[-1] else: return None def df_index(seq, index = 'inner'): """ Determines a joint index of multiple timeseries objects. :Parameters: ---------------- seq : sequence whose index needs to be determined a (possible nested) sequence of timeseries/non-timeseries object within lists/dicts index : str, optional method to determine the index. The default is 'inner'. :Returns: ------- pd.Index The joint index. :Example: --------- >>> tss = [pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)] >>> more_tss_as_dict = dict(zip('abcde',[pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)])) >>> res = df_index(tss + [more_tss_as_dict], 'inner') >>> assert len(res) == 6 >>> res = df_index(more_tss_as_dict, 'outer') >>> assert len(res) == 14 """ listed = _list(seq) indexes = [ts.index for ts in listed if is_pd(ts)] if len(indexes): return _df_index(indexes, index) arrs = [len(ts) for ts in listed if is_arr(ts)] if len(arrs): return _np_index(arrs, index) else: return None def df_columns(seq, index = 'inner'): """ returns the columns of the joint object :Example: --------- >>> a = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-99), list('abcde')) >>> b = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-99), list('bcdef')) >>> assert list(df_columns([a,b])) == list('bcde') >>> assert list(df_columns([a,b], 'oj')) == list('abcdef') >>> assert list(df_columns([a,b], 'lj')) == list('abcde') >>> assert list(df_columns([a,b], 'rj')) == list('bcdef') :Parameters: ---------- seq : sequence of dataframes DESCRIPTION. index : str, optional how to inner-join. The default is 'inner'. :Returns: ------- pd.Index list of columns. """ listed = _list(seq) indexes= [ts.columns for ts in listed if is_df(ts) and ts.shape[1]>1 and len(set(ts.columns)) == ts.shape[1]] #dataframe with non-unique columns are treated like arrays if len(indexes): return _df_index(indexes, index) arrs = [ts.shape[1] for ts in listed if (is_arr(ts) or is_df(ts)) and len(ts.shape)>1 and ts.shape[1]>1] if len(arrs): return _np_index(arrs, index) return None @loop(list, tuple, dict) def _df_fillna(df, method = None, axis = 0, limit = None): methods = as_list(method) if len(methods) == 0: return df if is_arr(df): return df_fillna(pd.DataFrame(df) if len(df.shape)==2 else
pd.Series(df)
pandas.Series
import argparse import math import json from tqdm import tqdm from nltk.tag import pos_tag import pandas as pd import networkx as nx import torch import config def get_relevant_tokens(word_count_path, threshold): d = pd.read_csv(word_count_path, sep='\t', header=None, quotechar=None, quoting=3) d.columns = ['token', 'count'] d = d.loc[d['count'] > threshold] return d.token.tolist() def prune_dt(input_dt_edges_path, relevant_tokens, output_dt_edges_path): d =
pd.read_csv(input_dt_edges_path, sep='\t', header=None, quotechar=None, quoting=3)
pandas.read_csv
# Module deals with creation of ligand and receptor scores, and creation of scConnect tables etc. import scConnect as cn import scanpy as sc version = cn.database.version organism = cn.database.organism # Scoring logic for ligands def ligandScore(ligand, genes): """calculate ligand score for given ligand and gene set""" from scipy.stats.mstats import gmean import numpy as np if ligand.ligand_type == "peptide" and isinstance(ligand.preprogene, str): # check if multiple genes needs to be accounted for if isinstance(eval(ligand.preprogene), list): ligand_genes = list() for gene in eval(ligand.preprogene): try: ligand_genes.append(genes[gene]) except KeyError: #print(f"{gene} not found") ligand_genes.append(0.0) # use max, as there might be many orthologs genes for one original # gene and not all have to be expressed try: ligand_score = max(ligand_genes) except ValueError: print(f"something is wrong with the list {ligand_genes}") ligand_score = 0.0 return ligand_score elif ligand.ligand_type == "molecule": synthesis = ligand.synthesis transport = ligand.transport reuptake = ligand.reuptake excluded = ligand.excluded # get geometric mean of synthesis genes (all need to be present) if not isinstance(synthesis, str): # If no genes are needed, synthesis is set to nan synthesis = np.nan else: synthesis_expression = list() for gene in eval(synthesis): try: synthesis_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") synthesis_expression.append(0.0) synthesis = gmean(synthesis_expression) # get maximum of vesicle transporters (only one is needed for molecule transport) if not isinstance(transport, str): # If no specific genes are needed, set transport to nan transport = np.nan else: transport_expression = list() for gene in eval(transport): try: transport_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") transport_expression.append(0.0) transport = max(transport_expression) # Get maximum of reuptake genes (only one is needed) if not isinstance(reuptake, str): # If no specific genes are needed, set reuptake to nan reuptake = np.nan else: reuptake_expression = list() for gene in eval(reuptake): try: reuptake_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") reuptake_expression.append(0.0) reuptake = max(reuptake_expression) # get maximum among exluding genes where any gene expression divert to other ligands if not isinstance(excluded, str): # If no specific genes are needed, set excluded to 0 excluded = 0 else: excluded_expression = list() for gene in eval(excluded): try: excluded_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") excluded_expression.append(0.0) excluded = max(excluded_expression) # return geometric mean of synthesis, transport and reuptake multipled exclusion promoting_factor = gmean(([x for x in [synthesis, transport, reuptake] if str(x) != "nan"])) # genes driving ligand production, remove nan values if str(promoting_factor) == "nan": # capture cases where no promoting genes were present print(f"no promoting genes detected for {ligand.ligand}") return 0.0 # exit before running exclusion calculation ligand_score = promoting_factor - excluded # correct ligand expression based on the exclusion factor if ligand_score < 0: # ligand score should be 0 or positive ligand_score = 0.0 return ligand_score # If genes are missing from ligand gene list else: print("Big error! ligand type is not defined!") return 0.0 def ligands(adata, organism=organism, select_ligands=None): """return a dataframe with ligand scores for each cluster. .. note:: Needs a gene call dataframe under adata.uns.gene_call. Use scConnect.genecall to create such dataframe organism defaults to mouse, to use genes for other organism select this here. use select_ligands to only asses given ligands (used by optimize_segregation to only check for gaba and glutamate) Returns: Dict of ligand call for each cluster. """ import scConnect as cn import pkg_resources import pandas as pd ligands = pd.read_csv(pkg_resources.resource_filename( __name__, (f"data/Gene_annotation/{version}/{organism}/ligands.csv"))) if isinstance(select_ligands, list): select = [True if ligand in select_ligands else False for ligand in ligands.ligand] ligands = ligands[select] ligand_df = pd.DataFrame(index=ligands.ligand) for cluster, genes in adata.uns["gene_call"].items(): cluster_scores = list() for ligand_data in ligands.iterrows(): ligand = ligand_data[1] # fetch ligand score for specific ligand and gene set ligand_score = ligandScore(ligand, genes) cluster_scores.append(ligand_score) ligand_df[cluster] = cluster_scores adata.uns["ligands"] = ligand_df.to_dict() return adata # Scoring logic for receptors def receptorScore(receptor, genes): """calculate receptor score given receptor and gene set""" from scipy.stats.mstats import gmean gene_expression = list() for gene in eval(receptor.gene): try: gene_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") gene_expression.append(0.0) # use max, as several genes might be found during ortholog search, # not all might bee needed to create the receptor gene_expression = max(gene_expression) return gene_expression def receptors(adata, organism=organism): """return a dataframe with receptor scores for each cluster. .. note:: Needs a gene call dataframe under adata.uns.gene_call. Use scConnect.genecall to create such dataframe. Returns: Dict of receptor call for each cluster. """ import scConnect as cn import pkg_resources import pandas as pd receptors = pd.read_csv(pkg_resources.resource_filename( __name__, (f"data/Gene_annotation/{version}/{organism}/receptors.csv"))) receptor_df = pd.DataFrame(index=receptors.receptor) for cluster, genes in adata.uns["gene_call"].items(): cluster_scores = list() for receptor_data in receptors.iterrows(): receptor = receptor_data[1] # fetch ligand score for specific ligand and gene set receptor_score = receptorScore(receptor, genes) cluster_scores.append(receptor_score) receptor_df[cluster] = cluster_scores adata.uns["receptors"] = receptor_df.to_dict() return adata # Interaction logic def interactions(emitter, target, self_reference=True, organism=organism, corr_pval=True): """return an edge list of interactions between clusters. If all connections are of interest, use the same data source for emitter and target. .. note:: self_reference is only valid when emitter == target. .. note:: edge_list is returned as a list, and not in a adata object. This is since multiple adata objects can be passed in to the function, and whould lead to ambiguity of which object to append the edge_list to. Returns: List of edges between given emmitor and target clusters. """ import pkg_resources import pandas as pd from itertools import product from scConnect.tools import printProgressBar interactions = pd.read_csv(pkg_resources.resource_filename( __name__, (f"data/Gene_annotation/{version}/interactions.csv")), index_col=[0, 1], sep=";") interactions.sort_index(axis="index", inplace=True) # Create a set of all possible index combinations. # This is used to test if ligand receptor combination is present. interaction_set = set(interactions.index) # An edge list should contain u, v and d, # where u is input node, v is output node # and d is a dictionary with edge attributes. edge_list = list() # get all clusters # NOTE: if the same cluster name is used in emitter and target datasets, they are # assumed to be the same cluster. Give your clusters uniqe names between your datasets. try: emitter_clusters = pd.DataFrame(emitter.uns["ligands"]).columns target_clusters = pd.DataFrame(target.uns["ligands"]).columns except KeyError: print( f"Please run connect.ligands() and connect.receptors() on your datasets first") return # Calculate total number of cluster combinations for the progress bar if self_reference is True: total_comb = len(list(product(emitter_clusters, target_clusters))) else: total_comb = len([(e, t) for (e, t) in product( emitter_clusters, target_clusters) if e != t]) ligands = pd.DataFrame(emitter.uns["ligands"]) receptors = pd.DataFrame(target.uns["receptors"]) # load extra ligand and receptor statistics ligands_zscore = pd.DataFrame(emitter.uns["ligands_zscore"]) receptors_zscore = pd.DataFrame(target.uns["receptors_zscore"]) if corr_pval: ligands_pval = pd.DataFrame(emitter.uns["ligands_corr_pval"]) receptors_pval = pd.DataFrame(target.uns["receptors_corr_pval"]) else: ligands_pval = pd.DataFrame(emitter.uns["ligands_pval"]) receptors_pval = pd.DataFrame(target.uns["receptors_pval"]) # Fetch receptor and ligand information receptor_info = pd.read_csv(pkg_resources.resource_filename( __name__, (f"data/Gene_annotation/{version}/{organism}/receptors.csv")), index_col=1) receptor_info = receptor_info[["family", "gene"]] ligand_info = pd.read_csv(pkg_resources.resource_filename( __name__, (f"data/Gene_annotation/{version}/{organism}/ligands.csv")), index_col=1) ligand_info = ligand_info[["ligand_type", "comment"]] # Nested for-loop to get all combinations of # interactions between clusters. comb_tried = 0 for emitter_cluster in emitter_clusters: for target_cluster in target_clusters: # Are we interested in self referencing information? # I leave that up to the user if emitter_cluster != target_cluster or self_reference == True: # Get only ligands and receptors expressed by the clusters # (speeds up itterative functions later) emitter_ligands = ligands[emitter_cluster][ligands[emitter_cluster] > 0] target_receptors = receptors[target_cluster][receptors[target_cluster] > 0] connections = get_connections( emitter_ligands, target_receptors, interactions, interaction_set, receptor_info, ligand_info, emitter_cluster, target_cluster, ligands_zscore, ligands_pval, receptors_zscore, receptors_pval) if len(connections) > 0: for connection in connections: edge_list.append(connection) # Add the progress bar comb_tried += 1 printProgressBar( comb_tried, total_comb, prefix=f"finding connections between {len(emitter_clusters)} emitter clusters and {len(target_clusters)} target clusters") return edge_list # get all connections based on Ligands and receptors, and provide score for interactions # Also provide meta data as a dictionary for interaction def scale(value, from_range=(0, 1), to_range=(10E-100, 1)): # mitagate log with 0 value = to_range[0] + (to_range[1] - to_range[0]) * (value -from_range[0]) / (to_range[1] - to_range[0]) return value def get_connections( ligands, receptors, interactions, interaction_set, receptor_info, ligand_info, emitter_cluster, target_cluster, ligands_zscore, ligands_pval, receptors_zscore, receptors_pval): """finds connections between ligands and receptors and return a score and metadata for each interaction""" from scipy.stats.mstats import gmean import numpy as np # shorten the list of interactions to only contain relevant ligands. # This should speed up the algorithm ligand_filter = [True if ligand in ligands.keys() else False for ligand in interactions.index.get_level_values(0)] interactions = interactions.loc[ligand_filter] def interaction_specificity(l, r): # used to calculate interaction specificity score sig = -np.log10((l+r)/2) return sig connections = list() for ligand, l_score in ligands.iteritems(): for receptor, r_score in receptors.iteritems(): if (ligand, receptor) in interaction_set: interaction = interactions.loc[ligand, receptor] score = float(gmean((l_score, r_score))) ligand_pval = float(ligands_pval[emitter_cluster][ligand]) receptor_pval = float(receptors_pval[target_cluster][receptor]) specificity = float(interaction_specificity(ligand_pval, receptor_pval)) log_score = float(np.log10(score + 1)) importance = specificity * log_score connections.append((ligands.name, receptors.name, { "score": float(score), "log_score": log_score, # From here on, all values are +1ed and logaritmized with base of 10. # From here on, all values are +1ed and logaritmized with base of 10. "ligand": str(ligand), "ligand_zscore": float(ligands_zscore[emitter_cluster][ligand]), "ligand_pval": ligand_pval, "receptor": str(receptor), "receptor_zscore": float(receptors_zscore[target_cluster][receptor]), "receptor_pval": receptor_pval, "interaction": f"{ligand} --> {receptor}", "specificity": specificity, "importance": importance, "endogenous": f"{list(interaction.endogenous)}", "action": f"{list(interaction.action)}", "ligandspecies": f"{list(interaction.ligand_species)}", "receptorspecies": f"{list(interaction.target_species)}", "pubmedid": f"{list(interaction.pubmed_id)[:5]}", "receptorfamily": str(receptor_info.loc[receptor]["family"]), "receptorgene": str(receptor_info.loc[receptor]["gene"]), "ligandtype": str(ligand_info.loc[ligand]["ligand_type"]), "ligandcomment": str(ligand_info.loc[ligand]["comment"])})) return connections def nodes(adatas): """ Returns an list of nodes, attributes dictionary tuples. Each tuple represent one node with an attribute dictionary: *(cluster, dict(receptors: dict(receptor:score), ligands: dict(ligand:score) ))* """ if not isinstance(adatas, list): adatas = [adatas, ] nodes = [] for i, adata in enumerate(adatas): print(f"precessing adata #{i+1}") ligands_score = adata.uns["ligands"] ligands_zscore = adata.uns["ligands_zscore"] ligands_pval = adata.uns["ligands_pval"] ligands_corr_pval = adata.uns["ligands_corr_pval"] receptors_score = adata.uns["receptors"] receptors_zscore = adata.uns["receptors_zscore"] receptors_pval = adata.uns["receptors_pval"] receptors_corr_pval = adata.uns["receptors_corr_pval"] genes = adata.uns["gene_call"] clusters = ligands_score.keys() # Filter out ligands with positive score (remove non expressing ligands and receptors) for cluster in clusters: print(f"processing cluster {cluster}") cluster_ligands_score = {k: v for k, v in ligands_score[cluster].items() if v > 0} cluster_receptors_score = {k: v for k, v in receptors_score[cluster].items() if v > 0} # Add all information to the node dicionary node = (cluster, { "ligands_score": cluster_ligands_score, "ligands_zscore": ligands_zscore[cluster], "ligands_pval": ligands_pval[cluster], "ligands_corr_pval": ligands_corr_pval[cluster], "receptors_score": cluster_receptors_score, "receptors_zscore": receptors_zscore[cluster], "receptors_pval": receptors_pval[cluster], "receptors_corr_pval": receptors_corr_pval[cluster], "genes": genes[cluster]}) nodes.append(node) return nodes # Statistic inference of ligand and receptor scores # Here we shuffel the group annotations many times, calculate ligand and receptor scores # and find the mean and standard deviation for each ligand/receptor score for each gorup. # We can then calculate the z-score of the true ligand/receptor score, p-values and corrected p-values # Data an be used to detect group specific expression of ligands and receptors. def _ligand_receptor_call(adata, groupby, organism, transformation, return_df = True): import pandas as pd adata = cn.genecall.meanExpression(adata, groupby=groupby, normalization=False, use_raw=False, transformation=transformation) adata = cn.connect.ligands(adata, organism=organism) adata = cn.connect.receptors(adata, organism=organism) ligands = pd.DataFrame(adata.uns["ligands"]) receptors = pd.DataFrame(adata.uns["receptors"]) if return_df: return ligands, receptors def _values_df(dfs): values_df = dfs[0].copy() for i in range(values_df.shape[0]): for j in range(values_df.shape[1]): values = list() for df in range(len(dfs)): values.append(dfs[df].iloc[i,j]) values_df.iloc[i,j] = str(values) return values_df def _mean_df(df): import numpy as np mean_df = df.copy() for i in range(df.shape[0]): for j in range(df.shape[1]): mean_df.iloc[i,j] = np.mean(eval(df.iloc[i,j])) return mean_df def _std_df(df): import numpy as np std_df = df.copy() for i in range(df.shape[0]): for j in range(df.shape[1]): std_df.iloc[i,j] = np.std(eval(df.iloc[i,j])) return std_df def _score_pv_df(mean, std, value, emperical, values, merge_dist): """Calculate z-scores and p-values for ligand and receptor calls compared to random group designation returns: score_df and pval_df""" import numpy as np from scipy import stats assert (mean.shape == std.shape == value.shape), "dataframes are not of the same size, rerun ligand and receptor call" score_df = mean.copy() pval_df = mean.copy() warning = False # warning flag for if mean or std is 0 (mening no values were ever sampled to that group) faults = 0 for i in range(score_df.shape[0]): # for each ligand and receptor if merge_dist == True: dist = list() for j in range(score_df.shape[1]): for val in eval(values.iloc[i,j]): dist.append(val) for j in range(score_df.shape[1]): # for each celltype v = value.iloc[i,j] s = std.iloc[i,j] m = mean.iloc[i,j] if merge_dist == False: dist = eval(values.iloc[i,j]) if s == 0: # sampeling never managed to include this ligand or receptor for this group z_score = 0.0 pval = 1 warning = True faults += 1 else: z_score = (v-m)/s #pval = float(stats.norm.sf(abs(z_score))*2) # Two tailed p-value pval = float(stats.norm.sf(z_score)) # one tailed p-value if emperical == True: # Calculate exact permutation pvalues emperically from the collected distribution # method from https://www.degruyter.com/view/journals/sagmb/9/1/article-sagmb.2010.9.1.1585.xml.xml # permutation without replacement (use full sequence) b = sum(dist > v) m = len(dist) pval = (b+1)/(m+1) score_df.iloc[i,j] = z_score pval_df.iloc[i,j] = pval if warning: total = score_df.shape[0] * score_df.shape[1] print(f"{faults/total*100} % of group metrices were 0. increase n to reduce this number") return score_df, pval_df def _corrected_pvalue(pvalues, method="fdr_bh", scale_pval=False): """correct a dataframe of p-values to a dataframe of corrected p-values. Supports many different methods: bonferroni : one-step correction sidak : one-step correctio holm-sidak : step down method using Sidak adjustment holm : step-down method using Bonferroni adjustment simes-hochberg : step-up method (independent hommel : closed method based on Simes tests (non-negative) fdr_bh : Benjamini/Hochberg (non-negative) fdr_by : Benjamini/Yekutieli (negative) fdr_tsbh : two stage fdr correction (non-negative) fdr_tsbky : two stage fdr correction (non-negative) defaults to fdr_bh returns a pandas dataframe """ import statsmodels.stats.multitest as mt import pandas as pd p_flat = pvalues.to_numpy().flatten() corr_p = mt.multipletests(p_flat, method=method)[1] corr_p = corr_p.reshape(pvalues.shape) corr_pval = pd.DataFrame(corr_p, columns=pvalues.columns, index=pvalues.index) # scale p values to remove abloslute 0 calls if scale_pval: corr_pval = scale(corr_pval) return corr_pval def specificity(adata, n, groupby, organism=organism, return_values=False, transformation="log1p", emperical=True, merge_dist=False): """calculate statistics for the ligands and receptor scores. Compare the group ligand and receptor scores to the mean score of that group after n number of permutations if emperical is True (default), calculates p-values emperically given the collected random distribution. p = (b+1)/(m+1) where b is the number of permutated values higher than the observed and m is the number of permutations used (set this by the argument n)""" from random import shuffle import pandas as pd from scConnect.tools import printProgressBar _adata = adata.copy() groups = list(_adata.obs[groupby]) ligand_dfs = list() receptor_dfs = list() # variable to store the setting in, can be used when saving the specificity settings = dict( groupby = groupby, permutations = n, transformation = transformation, emperical = emperical, merge_dist = merge_dist ) # Run normal ligand and receptor call without shuffel on original adata _ligand_receptor_call(adata, groupby=groupby, organism=organism, transformation=transformation, return_df=False) # shuffel group annotations n times and fetch ligand and receptor dataframes for i in range(n): printProgressBar(i+1, n, prefix=f"Shuffeling dataframe {i+1} out of {n}") shuffle(groups) _adata.obs[groupby] = groups ligand, receptor = _ligand_receptor_call(_adata, groupby=groupby, organism=organism, transformation=transformation) ligand_dfs.append(ligand) receptor_dfs.append(receptor) # Merge all dataframes to one datafram (with list of values for each element) ligand_values = _values_df(ligand_dfs) receptor_values = _values_df(receptor_dfs) # Calculate the mean values of the list in each element print("Calculating means...") ligand_mean = _mean_df(ligand_values) receptor_mean = _mean_df(receptor_values) # Calculate the standard deviation of the list in each element print("Calculating standard deviations...") ligand_std = _std_df(ligand_values) receptor_std = _std_df(receptor_values) # Calculate Z-scores, p-values and corrected p-values print("Calculating Z-score, p-values and corrected p-values...") ligand_value = pd.DataFrame(adata.uns["ligands"]) ligand_score , ligand_pval = _score_pv_df(ligand_mean, ligand_std, ligand_value, emperical, ligand_values, merge_dist=merge_dist) ligand_corr_pval = _corrected_pvalue(ligand_pval, scale_pval=not emperical) receptor_value = pd.DataFrame(adata.uns["receptors"]) receptor_score , receptor_pval = _score_pv_df(receptor_mean, receptor_std, receptor_value, emperical, receptor_values, merge_dist=merge_dist) receptor_corr_pval = _corrected_pvalue(receptor_pval, scale_pval=not emperical) adata.uns.update({"ligands_zscore": ligand_score.to_dict()}) adata.uns.update({"receptors_zscore": receptor_score.to_dict()}) adata.uns.update({"ligands_pval": ligand_pval.to_dict()}) adata.uns.update({"receptors_pval": receptor_pval.to_dict()}) adata.uns.update({"ligands_corr_pval": ligand_corr_pval.to_dict()}) adata.uns.update({"receptors_corr_pval": receptor_corr_pval.to_dict()}) adata.uns.update({"specificity_setting": settings}) if return_values: return adata, ligand_values, receptor_values return adata # Save and load specificity calculations (time consuming) def save_specificity(adata, filename): """Saves data calculated by cn.connect.specificity to an excel file. This file can later be loaded using cn.connect.load_specificity""" import pandas as pd keys = [ 'ligands', 'receptors', 'ligands_zscore', 'receptors_zscore', 'ligands_pval', 'receptors_pval', 'ligands_pval', 'receptors_pval', 'ligands_corr_pval', 'receptors_corr_pval'] xls = pd.ExcelWriter(filename) for key in keys: table = pd.DataFrame(adata.uns[key]) table.to_excel(xls, sheet_name=key) s = pd.Series(adata.uns["specificity_setting"]) s.to_excel(xls, sheet_name="specificity_setting") xls.close() def load_specificity(adata, filename): """Loads previously calculated specificity to an andata object""" import pandas as pd keys = [ 'ligands', 'receptors', 'ligands_zscore', 'receptors_zscore', 'ligands_pval', 'receptors_pval', 'ligands_pval', 'receptors_pval', 'ligands_corr_pval', 'receptors_corr_pval'] for key in keys: data =
pd.read_excel(filename, sheet_name=key, index_col=0)
pandas.read_excel
import pickle import numpy as np import pandas as pd from datarobot_drum.drum.common import ( PythonArtifacts, REGRESSION_PRED_COLUMN, extra_deps, SupportedFrameworks, TargetType, ) from datarobot_drum.drum.exceptions import DrumCommonException from datarobot_drum.drum.artifact_predictors.artifact_predictor import ArtifactPredictor class XGBoostPredictor(ArtifactPredictor): """ This Predictor supports both XGBoost native & sklearn api wrapper as well """ def __init__(self): super(XGBoostPredictor, self).__init__( SupportedFrameworks.XGBOOST, PythonArtifacts.PKL_EXTENSION ) def is_framework_present(self): try: import xgboost return True except ImportError as e: self._logger.debug("Got error in imports: {}".format(e)) return False def framework_requirements(self): return extra_deps[SupportedFrameworks.XGBOOST] def can_load_artifact(self, artifact_path): if self.is_artifact_supported(artifact_path) and self.is_framework_present(): return True return False def can_use_model(self, model): if not self.is_framework_present(): return False try: from sklearn.pipeline import Pipeline import xgboost if isinstance(model, Pipeline): # check the final estimator in the pipeline is XGBoost if isinstance( model[-1], (xgboost.sklearn.XGBClassifier, xgboost.sklearn.XGBRegressor) ): return True elif isinstance(model, xgboost.core.Booster): return True return False except Exception as e: self._logger.debug("Exception: {}".format(e)) return False def load_model_from_artifact(self, artifact_path): with open(artifact_path, "rb") as picklefile: try: model = pickle.load(picklefile, encoding="latin1") except TypeError: model = pickle.load(picklefile) return model def predict(self, data, model, **kwargs): # checking if positive/negative class labels were provided # done in the base class super(XGBoostPredictor, self).predict(data, model, **kwargs) import xgboost xgboost_native = False if isinstance(model, xgboost.core.Booster): xgboost_native = True data = xgboost.DMatrix(data) if self.target_type.value in TargetType.CLASSIFICATION.value: if xgboost_native: predictions = model.predict(data) if self.target_type == TargetType.BINARY or len(self.class_labels) == 2: negative_preds = 1 - predictions predictions = np.concatenate( (negative_preds.reshape(-1, 1), predictions.reshape(-1, 1)), axis=1 ) else: if predictions.shape[1] != len(self.class_labels): raise DrumCommonException( "Target type '{}' predictions must return the " "probability distribution for all class labels".format(self.target_type) ) else: predictions = model.predict_proba(data) predictions =
pd.DataFrame(predictions, columns=self.class_labels)
pandas.DataFrame
import numpy as np import pytest import pandas as pd from pandas import DataFrame, Index, Series, date_range, offsets import pandas._testing as tm class TestDataFrameShift: def test_shift(self, datetime_frame, int_frame): # naive shift shiftedFrame = datetime_frame.shift(5) tm.assert_index_equal(shiftedFrame.index, datetime_frame.index) shiftedSeries = datetime_frame["A"].shift(5) tm.assert_series_equal(shiftedFrame["A"], shiftedSeries) shiftedFrame = datetime_frame.shift(-5) tm.assert_index_equal(shiftedFrame.index, datetime_frame.index) shiftedSeries = datetime_frame["A"].shift(-5) tm.assert_series_equal(shiftedFrame["A"], shiftedSeries) # shift by 0 unshifted = datetime_frame.shift(0) tm.assert_frame_equal(unshifted, datetime_frame) # shift by DateOffset shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay()) assert len(shiftedFrame) == len(datetime_frame) shiftedFrame2 = datetime_frame.shift(5, freq="B") tm.assert_frame_equal(shiftedFrame, shiftedFrame2) d = datetime_frame.index[0] shifted_d = d + offsets.BDay(5) tm.assert_series_equal( datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False ) # shift int frame int_shifted = int_frame.shift(1) # noqa # Shifting with PeriodIndex ps = tm.makePeriodFrame() shifted = ps.shift(1) unshifted = shifted.shift(-1) tm.assert_index_equal(shifted.index, ps.index) tm.assert_index_equal(unshifted.index, ps.index) tm.assert_numpy_array_equal( unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values ) shifted2 = ps.shift(1, "B") shifted3 = ps.shift(1, offsets.BDay()) tm.assert_frame_equal(shifted2, shifted3) tm.assert_frame_equal(ps, shifted2.shift(-1, "B")) msg = "does not match PeriodIndex freq" with pytest.raises(ValueError, match=msg): ps.shift(freq="D") # shift other axis # GH#6371 df = DataFrame(np.random.rand(10, 5)) expected = pd.concat( [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]], ignore_index=True, axis=1, ) result = df.shift(1, axis=1) tm.assert_frame_equal(result, expected) # shift named axis df = DataFrame(np.random.rand(10, 5)) expected = pd.concat( [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]], ignore_index=True, axis=1, ) result = df.shift(1, axis="columns") tm.assert_frame_equal(result, expected) def test_shift_bool(self): df = DataFrame({"high": [True, False], "low": [False, False]}) rs = df.shift(1) xp = DataFrame( np.array([[np.nan, np.nan], [True, False]], dtype=object), columns=["high", "low"], ) tm.assert_frame_equal(rs, xp) def test_shift_categorical(self): # GH#9416 s1 = Series(["a", "b", "c"], dtype="category") s2 = Series(["A", "B", "C"], dtype="category") df = DataFrame({"one": s1, "two": s2}) rs = df.shift(1) xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)}) tm.assert_frame_equal(rs, xp) def test_shift_fill_value(self): # GH#24128 df = DataFrame( [1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H") ) exp = DataFrame( [0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H") ) result = df.shift(1, fill_value=0) tm.assert_frame_equal(result, exp) exp = DataFrame( [0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H") ) result = df.shift(2, fill_value=0) tm.assert_frame_equal(result, exp) def test_shift_empty(self): # Regression test for GH#8019 df = DataFrame({"foo": []}) rs = df.shift(-1) tm.assert_frame_equal(df, rs) def test_shift_duplicate_columns(self): # GH#9092; verify that position-based shifting works # in the presence of duplicate columns column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]] data = np.random.randn(20, 5) shifted = [] for columns in column_lists: df = pd.DataFrame(data.copy(), columns=columns) for s in range(5): df.iloc[:, s] = df.iloc[:, s].shift(s + 1) df.columns = range(5) shifted.append(df) # sanity check the base case nulls = shifted[0].isna().sum() tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64")) # check all answers are the same tm.assert_frame_equal(shifted[0], shifted[1]) tm.assert_frame_equal(shifted[0], shifted[2]) def test_shift_axis1_multiple_blocks(self): # GH#35488 df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3))) df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2))) df3 = pd.concat([df1, df2], axis=1) assert len(df3._mgr.blocks) == 2 result = df3.shift(2, axis=1) expected = df3.take([-1, -1, 0, 1, 2], axis=1) expected.iloc[:, :2] = np.nan expected.columns = df3.columns tm.assert_frame_equal(result, expected) # Case with periods < 0 # rebuild df3 because `take` call above consolidated df3 = pd.concat([df1, df2], axis=1) assert len(df3._mgr.blocks) == 2 result = df3.shift(-2, axis=1) expected = df3.take([2, 3, 4, -1, -1], axis=1) expected.iloc[:, -2:] = np.nan expected.columns = df3.columns tm.assert_frame_equal(result, expected) @pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning") def test_tshift(self, datetime_frame): # TODO: remove this test when tshift deprecation is enforced # PeriodIndex ps = tm.makePeriodFrame() shifted = ps.tshift(1) unshifted = shifted.tshift(-1) tm.assert_frame_equal(unshifted, ps) shifted2 = ps.tshift(freq="B") tm.assert_frame_equal(shifted, shifted2) shifted3 = ps.tshift(freq=offsets.BDay()) tm.assert_frame_equal(shifted, shifted3) msg = "Given freq M does not match PeriodIndex freq B" with pytest.raises(ValueError, match=msg): ps.tshift(freq="M") # DatetimeIndex shifted = datetime_frame.tshift(1) unshifted = shifted.tshift(-1) tm.assert_frame_equal(datetime_frame, unshifted) shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq) tm.assert_frame_equal(shifted, shifted2) inferred_ts = DataFrame( datetime_frame.values, Index(np.asarray(datetime_frame.index)), columns=datetime_frame.columns, ) shifted = inferred_ts.tshift(1) expected = datetime_frame.tshift(1) expected.index = expected.index._with_freq(None) tm.assert_frame_equal(shifted, expected) unshifted = shifted.tshift(-1) tm.assert_frame_equal(unshifted, inferred_ts) no_freq = datetime_frame.iloc[[0, 5, 7], :] msg = "Freq was not set in the index hence cannot be inferred" with pytest.raises(ValueError, match=msg): no_freq.tshift() def test_tshift_deprecated(self, datetime_frame): # GH#11631 with tm.assert_produces_warning(FutureWarning): datetime_frame.tshift() def test_period_index_frame_shift_with_freq(self): ps = tm.makePeriodFrame() shifted = ps.shift(1, freq="infer") unshifted = shifted.shift(-1, freq="infer") tm.assert_frame_equal(unshifted, ps) shifted2 = ps.shift(freq="B") tm.assert_frame_equal(shifted, shifted2) shifted3 = ps.shift(freq=
offsets.BDay()
pandas.offsets.BDay
import unittest import pdb import pandas as pd import numpy as np from pandas.util.testing import assert_frame_equal, assert_index_equal from ..models.condition_models import RuleKPI, RuleCondition, RuleConditionalOperator, RuleConditionGroup, RuleConditionGroupOperator class Test_conditional_operator(unittest.TestCase): def setUp(self): """ Create sample data """ d = { "datum": pd.Series([3., 2., 1., np.nan]), "criterion": pd.Series([np.nan, 1., 2., 3.]), } self.df = pd.DataFrame(d) def test_greater(self): """ Test filtering by greater than """ operator = RuleConditionalOperator.greater index = operator.selectedIndex(self.df, "criterion", 2.) self.assertEqual(index, pd.Int64Index([3])) def test_less(self): """ Test filtering by greater than """ operator = RuleConditionalOperator.less index = operator.selectedIndex(self.df, "criterion", 2.) self.assertEqual(index, pd.Int64Index([1])) def test_greater_than_or_equal(self): """ Test filtering by greater than """ operator = RuleConditionalOperator.greaterThanOrEqual index = operator.selectedIndex(self.df, "criterion", 2.) assert_index_equal(index, pd.Int64Index([2, 3])) def test_less_than_or_equal(self): """ Test filtering by greater than """ operator = RuleConditionalOperator.lessThanOrEqual index = operator.selectedIndex(self.df, "criterion", 2.) assert_index_equal(index, pd.Int64Index([1, 2])) def test_equal(self): """ Test filtering by greater than """ operator = RuleConditionalOperator.equal index = operator.selectedIndex(self.df, "criterion", 2.) assert_index_equal(index, pd.Int64Index([2])) class Test_condiition(unittest.TestCase): """ Test module for the search ads condition classes """ def test_spend(self): """ Test filtering by total spend """ d = { "keywordId": pd.Series([1, 2, 1]), "localSpend": pd.Series([1., 3., 3.]), } df = pd.DataFrame(d) condition = RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("greater"), comparisonValue=3.) index = condition.selectedIndex(df, groupByID="keywordId") assert_index_equal(index, pd.Int64Index([0, 2])) dataIndex = [0, 1, 2] d = { "keywordId": pd.Series([1, 2, 1], index=dataIndex), "localSpend": pd.Series([1., 3., 3.], index=dataIndex), "totalSpend": pd.Series([4., 3., 4.], index=dataIndex), } assert_frame_equal(df.sort_index(axis=1), pd.DataFrame(d).sort_index(axis=1)) def test_cpt(self): """ Test filtering by total CPT """ d = { "keywordId": pd.Series([1, 2, 1]), "localSpend": pd.Series([1., 3., 3.]), "taps": pd.Series([0, 0, 2.]), } df = pd.DataFrame(d) condition = RuleCondition(kpi=RuleKPI("reavgCPT"), operator=RuleConditionalOperator("less"), comparisonValue=3.) index = condition.selectedIndex(df, groupByID="keywordId") assert_index_equal(index, pd.Int64Index([0, 2])) dataIndex = [0, 1, 2] d = { "keywordId": pd.Series([1, 2, 1], index=dataIndex), "localSpend": pd.Series([1., 3., 3.], index=dataIndex), "taps": pd.Series([0, 0, 2.], index=dataIndex), "totalSpend": pd.Series([4., 3., 4.], index=dataIndex), "reavgCPT": pd.Series([2., np.nan, 2.], index=dataIndex), "totalTaps": pd.Series([2., 0, 2.], index=dataIndex), } assert_frame_equal(df.sort_index(axis=1), pd.DataFrame(d).sort_index(axis=1)) def test_cpa(self): """ Test filtering by total CPA """ d = { "keywordId": pd.Series([1, 2, 1]), "localSpend": pd.Series([1., 3., 3.]), "installs": pd.Series([0, 1., 2.]), } df = pd.DataFrame(d) condition = RuleCondition(kpi=RuleKPI("reavgCPA"), operator=RuleConditionalOperator("less"), comparisonValue=3.) index = condition.selectedIndex(df, groupByID="keywordId") assert_index_equal(index, pd.Int64Index([0, 2])) dataIndex = [0, 1, 2] d = { "keywordId": pd.Series([1, 2, 1], index=dataIndex), "localSpend": pd.Series([1., 3., 3.], index=dataIndex), "installs": pd.Series([0, 1., 2.], index=dataIndex), "totalSpend": pd.Series([4., 3., 4.], index=dataIndex), "reavgCPA": pd.Series([2., 3., 2.], index=dataIndex), "totalConversions": pd.Series([2., 1., 2.], index=dataIndex), } assert_frame_equal(df.sort_index(axis=1), pd.DataFrame(d).sort_index(axis=1)) class Test_condition_group_operator(unittest.TestCase): def setUp(self): """ Create sample data """ d = { "keywordId": pd.Series([1, 2, 3]), "localSpend": pd.Series([1., 2., 3.]), } self.df = pd.DataFrame(d) def test_all(self): """ Test filtering by all """ operator = RuleConditionGroupOperator.all conditions = [ RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("greater"), comparisonValue=1.), RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("less"), comparisonValue=3.), ] index = operator.selectedIndex(self.df, conditions=conditions, groupByID="keywordId") assert_index_equal(index, pd.Int64Index([1])) def test_any(self): """ Test filtering by any """ operator = RuleConditionGroupOperator.any conditions = [ RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("greater"), comparisonValue=2.), RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("less"), comparisonValue=2.), ] index = operator.selectedIndex(self.df, conditions=conditions, groupByID="keywordId") assert_index_equal(index, pd.Int64Index([0, 2])) class Test_condition_group(unittest.TestCase): def setUp(self): """ Create sample data """ d = { "keywordId": pd.Series([1, 2, 3]), "localSpend": pd.Series([1., 2., 3.]), } self.df = pd.DataFrame(d) def test_all(self): """ Test filtering by all """ subgroup = RuleConditionGroup(conditions=[RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("greater"), comparisonValue=1.)], subgroups=[], operator=RuleConditionGroupOperator.all) group = RuleConditionGroup(conditions=[RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("less"), comparisonValue=3.)], subgroups=[subgroup], operator=RuleConditionGroupOperator.all) index = group.selectedIndex(self.df, groupByID="keywordId") assert_index_equal(index, pd.Int64Index([1])) def test_any(self): """ Test filtering by any """ subgroup = RuleConditionGroup(conditions=[RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator( "greater"), comparisonValue=2.)], subgroups=[], operator=RuleConditionGroupOperator.any) group = RuleConditionGroup(conditions=[RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator( "less"), comparisonValue=2.)], subgroups=[subgroup], operator=RuleConditionGroupOperator.any) index = group.selectedIndex(self.df, groupByID="keywordId") assert_index_equal(index, pd.Int64Index([0, 2])) def test_filter(self): """ Test filtering data """ subgroup = RuleConditionGroup(conditions=[RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("greater"), comparisonValue=1.)], subgroups=[], operator=RuleConditionGroupOperator.all) group = RuleConditionGroup(conditions=[RuleCondition(kpi=RuleKPI("totalSpend"), operator=RuleConditionalOperator("less"), comparisonValue=3.)], subgroups=[subgroup], operator=RuleConditionGroupOperator.all) group.filterData(self.df, groupByID="keywordId") dataIndex = [1] d = { "keywordId": pd.Series([2], index=dataIndex), "localSpend":
pd.Series([2.], index=dataIndex)
pandas.Series
from __future__ import division #brings in Python 3.0 mixed type calculation rules import datetime import inspect import numpy as np import numpy.testing as npt import os.path import pandas as pd import sys from tabulate import tabulate import unittest print("Python version: " + sys.version) print("Numpy version: " + np.__version__) # #find parent directory and import model # parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) # sys.path.append(parent_dir) from ..kabam_exe import Kabam test = {} class TestKabam(unittest.TestCase): """ Unit tests for Kabam model. : unittest will : 1) call the setup method, : 2) then call every method starting with "test", : 3) then the teardown method """ print("kabam unittests conducted at " + str(datetime.datetime.today())) def setUp(self): """ Setup routine for Kabam unit tests. :return: """ pass # setup the test as needed # e.g. pandas to open Kabam qaqc csv # Read qaqc csv and create pandas DataFrames for inputs and expected outputs def tearDown(self): """ Teardown routine for Kabam unit tests. :return: """ pass # teardown called after each test # e.g. maybe write test results to some text file def create_kabam_object(self): # create empty pandas dataframes to create empty object for testing df_empty = pd.DataFrame() # create an empty kabam object kabam_empty = Kabam(df_empty, df_empty) return kabam_empty def test_ventilation_rate(self): """ :description Ventilation rate of aquatic animal :unit L/d :expression Kabam Eq. A5.2b (Gv) :param zoo_wb: wet weight of animal (kg) :param conc_do: concentration of dissolved oxygen (mg O2/L) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float') try: #use the zooplankton variables/values for the test kabam_empty.zoo_wb = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float') kabam_empty.conc_do = pd.Series([5.0, 10.0, 7.5], dtype='float') result = kabam_empty.ventilation_rate(kabam_empty.zoo_wb) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_pest_uptake_eff_gills(self): """ :description Pesticide uptake efficiency by gills :unit fraction "expresssion Kabam Eq. A5.2a (Ew) :param log kow: octanol-water partition coefficient () :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series(['nan', 0.540088, 0.540495], dtype = 'float') try: kabam_empty.log_kow = pd.Series(['nan', 5., 6.], dtype = 'float') kabam_empty.kow = 10.**(kabam_empty.log_kow) result = kabam_empty.pest_uptake_eff_bygills() npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_phytoplankton_k1_calc(self): """ :description Uptake rate constant through respiratory area for phytoplankton :unit: L/kg*d :expression Kabam Eq. A5.1 (K1:unique to phytoplankton) :param log kow: octanol-water partition coefficient () :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float') try: kabam_empty.log_kow = pd.Series([4., 5., 6.], dtype = 'float') kabam_empty.kow = 10.**(kabam_empty.log_kow) result = kabam_empty.phytoplankton_k1_calc(kabam_empty.kow) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_aq_animal_k1_calc(self): """ U:description ptake rate constant through respiratory area for aquatic animals :unit: L/kg*d :expression Kabam Eq. A5.2 (K1) :param pest_uptake_eff_bygills: Pesticide uptake efficiency by gills of aquatic animals (fraction) :param vent_rate: Ventilation rate of aquatic animal (L/d) :param wet_wgt: wet weight of animal (kg) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series(['nan', 1201.13849, 169.37439], dtype = 'float') try: pest_uptake_eff_bygills = pd.Series(['nan', 0.0304414, 0.0361228], dtype = 'float') vent_rate = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float') wet_wgt = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float') result = kabam_empty.aq_animal_k1_calc(pest_uptake_eff_bygills, vent_rate, wet_wgt) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_animal_water_part_coef(self): """ :description Organism-Water partition coefficient (based on organism wet weight) :unit () :expression Kabam Eq. A6a (Kbw) :param zoo_lipid: lipid fraction of organism (kg lipid/kg organism wet weight) :param zoo_nlom: non-lipid organic matter (NLOM) fraction of organism (kg NLOM/kg organism wet weight) :param zoo_water: water content of organism (kg water/kg organism wet weight) :param kow: octanol-water partition coefficient () :param beta: proportionality constant expressing the sorption capacity of NLOM or NLOC to that of octanol :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float') try: #For test purpose we'll use the zooplankton variable names kabam_empty.zoo_lipid_frac = pd.Series([0.03, 0.04, 0.06], dtype = 'float') kabam_empty.zoo_nlom_frac = pd.Series([0.10, 0.20, 0.30,], dtype = 'float') kabam_empty.zoo_water_frac = pd.Series([0.87, 0.76, 0.64], dtype = 'float') kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float') beta = 0.35 result = kabam_empty.animal_water_part_coef(kabam_empty.zoo_lipid_frac, kabam_empty.zoo_nlom_frac, kabam_empty.zoo_water_frac, beta) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_aq_animal_k2_calc(self): """ :description Elimination rate constant through the respiratory area :unit (per day) :expression Kabam Eq. A6 (K2) :param zoo_k1: Uptake rate constant through respiratory area for aquatic animals :param k_bw_zoo (Kbw): Organism-Water partition coefficient (based on organism wet weight () :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([2.5186969, 0.79045921, 0.09252798], dtype = 'float') try: #For test purpose we'll use the zooplankton variable names kabam_empty.zoo_k1 = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float') kabam_empty.k_bw_zoo = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float') result = kabam_empty.aq_animal_k2_calc(kabam_empty.zoo_k1, kabam_empty.k_bw_zoo) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_animal_grow_rate_const(self): """ :description Aquatic animal/organism growth rate constant :unit (per day) :expression Kabam Eq. A7.1 & A7.2 :param zoo_wb: wet weight of animal/organism (kg) :param water_temp: water temperature (degrees C) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.01255943, 0.00125594, 0.00251], dtype = 'float') try: #For test purpose we'll use the zooplankton variable names kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.0], dtype = 'float') kabam_empty.water_temp = pd.Series([10., 15., 20.], dtype = 'float') result = kabam_empty.animal_grow_rate_const(kabam_empty.zoo_wb) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_dietary_trans_eff(self): """ :description Aquatic animal/organizm dietary pesticide transfer efficiency :unit fraction :expression Kabam Eq. A8a (Ed) :param kow: octanol-water partition coefficient () :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.499251, 0.492611, 0.434783], dtype = 'float') try: kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float') result = kabam_empty.dietary_trans_eff() npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_aq_animal_feeding_rate(self): """ :description Aquatic animal feeding rate (except filterfeeders) :unit kg/d :expression Kabam Eq. A8b1 (Gd) :param wet_wgt: wet weight of animal/organism (kg) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([4.497792e-08, 1.0796617e-3, 0.073042572], dtype = 'float') try: #For test purpose we'll use the zooplankton variable names kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.], dtype = 'float') kabam_empty.water_temp = pd.Series([10., 15., 20.]) result = kabam_empty.aq_animal_feeding_rate(kabam_empty.zoo_wb) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_filterfeeder_feeding_rate(self): """ :description Filter feeder feeding rate :unit kg/d :expression Kabam Eq. A8b2 (Gd) :param self.gv_filterfeeders: filterfeeder ventilation rate (L/d) :param self.conc_ss: Concentration of Suspended Solids (Css - kg/L) :param particle_scav_eff: efficiency of scavenging of particles absorbed from water (fraction) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series(['nan', 1.97287e-7, 0.03282195], dtype = 'float') try: kabam_empty.gv_filterfeeders = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float') kabam_empty.conc_ss = pd.Series([0.00005, 0.00005, 0.07], dtype = 'float') kabam_empty.particle_scav_eff = 1.0 result = kabam_empty.filterfeeders_feeding_rate() npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_diet_uptake_rate_const(self): """ :description pesticide uptake rate constant for uptake through ingestion of food rate :unit kg food/kg organism - day :expression Kabam Eq. A8 (kD) :param dietary_trans_eff: dietary pesticide transfer efficiency (fraction) :param feeding rate: animal/organism feeding rate (kg/d) :param wet weight of aquatic animal/organism (kg) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.22455272, 0.05318532, 0.031755767 ], dtype = 'float') try: #For test purpose we'll use the zooplankton variable names kabam_empty.ed_zoo = pd.Series([0.499251, 0.492611, 0.434783], dtype = 'float') kabam_empty.gd_zoo = pd.Series([4.497792e-08, 1.0796617e-3, 0.073042572], dtype = 'float') kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.0]) result = kabam_empty.diet_uptake_rate_const(kabam_empty.ed_zoo, \ kabam_empty.gd_zoo, kabam_empty.zoo_wb) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_overall_diet_content(self): """ :description Overall fraction of aquatic animal/organism diet attributed to diet food component (i.e., lipids or NLOM or water) :unit kg/kg :expression not shown in Kabam documentation: it is associated with Kabam Eq. A9 overall_diet_content is equal to the sum over dietary elements : of (fraction of diet) * (content in diet element); for example zooplankton ingest seidment and : phytoplankton, thus the overall lipid content of the zooplankton diet equals : (fraction of sediment in zooplankton diet) * (fraction of lipids in sediment) + : (fraction of phytoplankton in zooplankton diet) * (fraction of lipids in phytoplankton) :param diet_fraction: list of values representing fractions of aquatic animal/organism diet attibuted to each element of diet :param content_fraction: list of values representing fraction of diet element attributed to a specific component of that diet element (e.g., lipid, NLOM, or water) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.025, 0.03355, 0.0465], dtype = 'float') try: #For test purposes we'll use the small fish diet variables/values kabam_empty.sfish_diet_sediment = pd.Series([0.0, 0.01, 0.05], dtype = 'float') kabam_empty.sfish_diet_phytoplankton = pd.Series([0.0, 0.01, 0.05], dtype = 'float') kabam_empty.sfish_diet_zooplankton = pd.Series([0.5, 0.4, 0.5], dtype = 'float') kabam_empty.sfish_diet_benthic_invertebrates = pd.Series([0.5, 0.57, 0.35], dtype = 'float') kabam_empty.sfish_diet_filterfeeders = pd.Series([0.0, 0.01, 0.05], dtype = 'float') kabam_empty.sediment_lipid = pd.Series([0.0, 0.01, 0.0], dtype = 'float') kabam_empty.phytoplankton_lipid = pd.Series([0.02, 0.015, 0.03], dtype = 'float') kabam_empty.zoo_lipid = pd.Series([0.03, 0.04, 0.05], dtype = 'float') kabam_empty.beninv_lipid = pd.Series([0.02, 0.03, 0.05], dtype = 'float') kabam_empty.filterfeeders_lipid = pd.Series([0.01, 0.02, 0.05], dtype = 'float') diet_elements = pd.Series([], dtype = 'float') content_fracs = pd.Series([], dtype = 'float') for i in range(len(kabam_empty.sfish_diet_sediment)): diet_elements = [kabam_empty.sfish_diet_sediment[i], kabam_empty.sfish_diet_phytoplankton[i], kabam_empty.sfish_diet_zooplankton[i], kabam_empty.sfish_diet_benthic_invertebrates[i], kabam_empty.sfish_diet_filterfeeders[i]] content_fracs = [kabam_empty.sediment_lipid[i], kabam_empty.phytoplankton_lipid[i], kabam_empty.zoo_lipid[i], kabam_empty.beninv_lipid[i], kabam_empty.filterfeeders_lipid[i]] result[i] = kabam_empty.overall_diet_content(diet_elements, content_fracs) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_fecal_egestion_rate_factor(self): """ Aquatic animal/organism egestion rate of fecal matter factor (to be multiplied by the feeding rate to calculate egestion rate of fecal matter) :unit (kg feces)/[(kg organism) - day] :expression Kabam Eq. A9 (GF) :param epsilonL: dietary assimilation rate of lipids (fraction) :param epsilonN: dietary assimilation rate of NLOM (fraction) :param epsilonW: dietary assimilation rate of water (fraction) :param diet_lipid; lipid content of aquatic animal/organism diet (fraction) :param diet_nlom NLOM content of aquatic animal/organism diet (fraction) :param diet_water water content of aquatic animal/organism diet (fraction) :param feeding_rate: aquatic animal/organism feeding rate (kg/d) :return: """ #this test includes two results; 'result1' represents the overall assimilation rate of the #aquatic animal/organism diet; and 'result' represents the product of this assimilation rate #and the feeding rate (this multiplication will be done in the main model routine #as opposed to within a method -- the method here is limited to the assimilation factor #because this factor is used elsewhere as well # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') result1 = pd.Series([], dtype='float') expected_results = pd.Series([1.43e-9, 5.005e-5, 4.82625e-3], dtype = 'float') try: #For test purposes we'll use the zooplankton variable names and relevant constant values kabam_empty.epsilon_lipid_zoo = 0.72 kabam_empty.epsilon_nlom_zoo = 0.60 kabam_empty.epsilon_water = 0.25 kabam_empty.v_ld_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float') kabam_empty.v_nd_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float') kabam_empty.v_wd_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float') kabam_empty.gd_zoo = pd.Series([4.e-08, 1.e-3, 0.075], dtype = 'float') result1 = kabam_empty.fecal_egestion_rate_factor(kabam_empty.epsilon_lipid_zoo, kabam_empty.epsilon_nlom_zoo, kabam_empty.epsilon_water, kabam_empty.v_ld_zoo, kabam_empty.v_nd_zoo, kabam_empty.v_wd_zoo) result = result1 * kabam_empty.gd_zoo npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_diet_elements_gut(self): """ Fraction of diet elements (i.e., lipid, NLOM, water) in the gut :unit (kg lipid) / (kg digested wet weight) :expression Kabam Eq. A9 (VLG, VNG, VWG) :param (epison_lipid_*) relevant dietary assimilation rate (fraction) :param (v_ld_*) relevant overall diet content of diet element (kg/kg) :param (diet_assim_factor_*) relevant: Aquatic animal/organism egestion rate of fecal matter factor :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.2, 0.196, 0.1575], dtype = 'float') try: #for this test we'll use the lipid content for zooplankton kabam_empty.epsilon_lipid_zoo = 0.72 kabam_empty.v_ld_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float') kabam_empty.diet_assim_factor_zoo = pd.Series([0.035, 0.05, 0.08], dtype = 'float') result = kabam_empty.diet_elements_gut(kabam_empty.epsilon_lipid_zoo, kabam_empty.v_ld_zoo, kabam_empty.diet_assim_factor_zoo) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_gut_organism_partition_coef(self): """ Partition coefficient of the pesticide between the gastrointenstinal track and the organism :unit none :expression Kabam Eq. A9 (KGB) :param vlg_zoo: lipid content in the gut :param vng_zoo: nlom content in the gut :param vwg_zoo: water content in the gut :param kow: pesticide Kow :param beta_aq_animals: proportionality constant expressing the sorption capacity of NLOM to that of octanol :param zoo_lipid_frac: lipid content in the whole organism :param zoo_nlom_frac: nlom content in the whole organism :param zoo_water_frac: water content in the whole organism :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.991233, 1.662808, 1.560184], dtype = 'float') try: #for this test we'll use the zooplankton varialbles kabam_empty.beta_aq_animals = 0.035 kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float') kabam_empty.vlg_zoo = pd.Series([0.2, 0.25, 0.15], dtype = 'float') kabam_empty.vng_zoo = pd.Series([0.1, 0.15, 0.25], dtype = 'float') kabam_empty.vwg_zoo = pd.Series([0.15, 0.35, 0.05], dtype = 'float') kabam_empty.zoo_lipid_frac = pd.Series([0.20, 0.15, 0.10], dtype = 'float') kabam_empty.zoo_nlom_frac = pd.Series([0.15, 0.10, 0.05], dtype = 'float') kabam_empty.zoo_water_frac = pd.Series([0.65, 0.75, 0.85], dtype = 'float') result = kabam_empty.gut_organism_partition_coef(kabam_empty.vlg_zoo, kabam_empty.vng_zoo, kabam_empty.vwg_zoo, kabam_empty.kow, kabam_empty.beta_aq_animals, kabam_empty.zoo_lipid_frac, kabam_empty.zoo_nlom_frac, kabam_empty.zoo_water_frac) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_fecal_elim_rate_const(self): """ rate constant for elimination of the pesticide through excretion of contaminated feces :unit per day :param gf_zoo: egestion rate of fecal matter (kg feces)/(kg organism-day) :param ed_zoo: dietary pesticide transfer efficiency (fraction) :param kgb_zoo: gut - partition coefficient of the pesticide between the gastrointestinal tract and the organism (-) :param zoo_wb: wet weight of organism (kg) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([7.5e-4, 0.0525, 5.625e-4], dtype = 'float') try: #for this test we'll use the zooplankton variables kabam_empty.gf_zoo = pd.Series([1.5e-9, 5.0e-5, 4.5e-3], dtype = 'float') kabam_empty.ed_zoo = pd.Series([0.5, 0.7, 0.25], dtype = 'float') kabam_empty.kgb_zoo = pd.Series([1.0, 1.5, 0.5], dtype = 'float') kabam_empty.zoo_wb = pd.Series([1.e-6, 1.e-3, 1.0], dtype = 'float') result = kabam_empty.fecal_elim_rate_const(kabam_empty.gf_zoo, kabam_empty.ed_zoo, kabam_empty.kgb_zoo, kabam_empty.zoo_wb) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_frac_pest_freely_diss(self): """ Calculate Fraction of pesticide freely dissolved in water column (that can be absorbed via membrane diffusion) :unit fraction :expression Kabam Eq. A2 :param conc_poc: Concentration of Particulate Organic Carbon in water column (kg OC/L) :param kow: octonal-water partition coefficient (-) :param conc_doc: Concentration of Dissolved Organic Carbon in water column (kg OC/L) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.13422819, 0.00462963, 0.00514139], dtype = 'float') try: #for this test we'll use the zooplankton variables kabam_empty.conc_poc = pd.Series([1.5e-3, 5.0e-3, 4.5e-4], dtype = 'float') kabam_empty.alpha_poc = 0.35 kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float') kabam_empty.conc_doc = pd.Series([1.5e-3, 5.0e-3, 4.5e-4], dtype = 'float') kabam_empty.alpha_doc = 0.08 result = kabam_empty.frac_pest_freely_diss() npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_conc_freely_diss_watercol(self): """ concentration of freely dissolved pesticide in overlying water column :unit g/L :param phi: Fraction of pesticide freely dissolved in water column (that can be absorbed via membrane diffusion) (fraction) :param water_column_eec: Water Column 1-in-10 year EECs (ug/L) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([1.e-1, 2.4e-2, 1.], dtype = 'float') try: #for this test we'll use the zooplankton variables kabam_empty.phi = pd.Series([0.1, 0.004, 0.05], dtype = 'float') kabam_empty.water_column_eec = pd.Series([1., 6., 20.], dtype = 'float') result = kabam_empty.conc_freely_diss_watercol() npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_conc_sed_norm_4oc(self): """ pesticide concentration in sediment normalized for organic carbon :unit g/(kg OC) :expression Kabam Eq. A4a :param pore_water_eec: freely dissolved pesticide concentration in sediment pore water :param k_oc: organic carbon partition coefficient (L/kg OC) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([2.5e4, 6.e4, 2.e6], dtype = 'float') try: #for this test we'll use the zooplankton variables kabam_empty.k_oc = pd.Series([2.5e4, 1.e4, 1.e5], dtype = 'float') kabam_empty.pore_water_eec = pd.Series([1., 6., 20.], dtype = 'float') result = kabam_empty.conc_sed_norm_4oc() npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_conc_sed_dry_wgt(self): """ Calculate concentration of chemical in solid portion of sediment :unit g/(kg dry) :expression Kabam Eq. A4 :param c_soc: pesticide concentration in sediment normalized for organic carbon g/(kg OC) :param sediment_oc: fraction organic carbon in sediment (fraction) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.001, 0.0036, 0.4], dtype = 'float') try: #for this test we'll use the zooplankton variables kabam_empty.c_soc = pd.Series([0.025, 0.06, 2.00], dtype = 'float') kabam_empty.sediment_oc = pd.Series([4., 6., 20.], dtype = 'float') kabam_empty.sediment_oc_frac = kabam_empty.percent_to_frac(kabam_empty.sediment_oc) result = kabam_empty.conc_sed_dry_wgt() npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_diet_pest_conc(self): """ overall concentration of pesticide in aquatic animal/organism diet :unit g/(kg wet weight) :expression Kabam Eq. A1 (SUM(Pi * CDi); :param diet_frac_lfish: fraction of large fish diet containing prey i (Pi in Eq. A1)) :param diet_conc_lfish: concentraiton of pesticide in prey i (CDi in Eq. A1) :param lipid_content_lfish: fraction of prey i that is lipid :notes for this test we populate all prey items for large fish even though large fish typically only consume medium fish :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') result1 = pd.Series([], dtype='float') result2 = pd.Series([], dtype='float') expected_results1 = pd.Series([0.2025, 0.2025, 0.205], dtype = 'float') expected_results2 = pd.Series([5.316667, 4.819048, 4.3], dtype = 'float') try: #for this test we'll use the large fish variables (there are 7 prey items listed #for large fish (sediment, phytoplankton, zooplankton, benthic invertebrates, # filterfeeders, small fish, and medium fish --- this is the order related #to the values in the two series below) kabam_empty.diet_frac_lfish = pd.Series([[0.02, 0.03, 0.10, 0.05, 0.10, 0.7], [0.0, 0.05, 0.05, 0.05, 0.10, 0.75], [0.01, 0.02, 0.03, 0.04, 0.10, 0.8]], dtype = 'float') kabam_empty.diet_conc_lfish = pd.Series([[0.10, 0.10, 0.20, 0.15, 0.30, 0.20], [0.10, 0.10, 0.20, 0.15, 0.30, 0.20], [0.10, 0.10, 0.20, 0.15, 0.30, 0.20]], dtype = 'float') kabam_empty.diet_lipid_content_lfish = pd.Series([[0.0, 0.02, 0.03, 0.03, 0.04, 0.04], [0.01, 0.025, 0.035, 0.03, 0.04, 0.045], [0.0, 0.02, 0.03, 0.03, 0.05, 0.05]], dtype = 'float') result1,result2 = kabam_empty.diet_pest_conc(kabam_empty.diet_frac_lfish, kabam_empty.diet_conc_lfish, kabam_empty.diet_lipid_content_lfish) npt.assert_allclose(result1, expected_results1, rtol=1e-4, atol=0, err_msg='', verbose=True) npt.assert_allclose(result2, expected_results2, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result1, expected_results1, result2, expected_results2] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_pest_conc_organism(self): """ concentration of pesticide in aquatic animal/organism :unit g/(kg wet weight) :expression Kabam Eq. A1 (CB) :param lfish_k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d) :param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d) :param lfish_kd: pesticide uptake rate constant for uptake through ingestion of food (kg food/(kg organism - day) :param lfish_ke: rate constant for elimination of the pesticide through excretion of feces (/d) :param lfish_kg: animal/organism growth rate constant (/d) :param lfish_km: rate constant for pesticide metabolic transformation (/d) :param lfish_mp: fraction of respiratory ventilation that involves por-water of sediment (fraction) :param lfish_mo: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction) :param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed via membrane diffusion (fraction) :param cwto: total pesticide concentraiton in water column above sediment (g/L) :param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L) :param total_diet_conc_lfish: concentration of pesticide in overall diet of aquatic animal/organism (g/kg wet weight) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([1.97044e-3, 1.85185e-3, 3.97389e-3], dtype = 'float') try: kabam_empty.phi = pd.Series([1.0, 1.0, 1.0], dtype = 'float') kabam_empty.water_column_eec = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float') kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float') #for this test we'll use the large fish variables (and values that may not specifically apply to large fish kabam_empty.lfish_k1 = pd.Series([10., 5., 2.], dtype = 'float') kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float') kabam_empty.lfish_kd = pd.Series([0.05, 0.03, 0.02], dtype = 'float') kabam_empty.lfish_ke = pd.Series([0.05, 0.02, 0.02], dtype = 'float') kabam_empty.lfish_kg = pd.Series([0.1, 0.01, 0.003], dtype = 'float') kabam_empty.lfish_km = pd.Series([0.0, 0.1, 0.5], dtype = 'float') kabam_empty.lfish_mp = pd.Series([0.0, 0.0, 0.05], dtype = 'float') kabam_empty.lfish_mo = pd.Series([1.0, 1.0, 0.95], dtype = 'float') kabam_empty.total_diet_conc_lfish = pd.Series( [.20, .30, .50], dtype = 'float') result = kabam_empty.pest_conc_organism(kabam_empty.lfish_k1, kabam_empty.lfish_k2, kabam_empty.lfish_kd, kabam_empty.lfish_ke, kabam_empty.lfish_kg, kabam_empty.lfish_km, kabam_empty.lfish_mp, kabam_empty.lfish_mo, kabam_empty.total_diet_conc_lfish) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_lipid_norm_residue_conc(self): """ Lipid normalized pesticide residue in aquatic animal/organism :unit ug/kg-lipid :expresssion represents a factor (CB/VLB) used in Kabam Eqs. F4, F5, & F6 :param cb_lfish: total pesticide concentration in animal/organism (g/kg-ww) :param lfish_lipid_frac: fraction of animal/organism that is lipid (fraction) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.025, 0.00833333, 0.0005], dtype = 'float') try: #for this test we'll use the large fish variables kabam_empty.out_cb_lfish = pd.Series([1.e-3, 5.e-4, 1.e-5], dtype = 'float') kabam_empty.lfish_lipid_frac = pd.Series([0.04, 0.06, 0.02], dtype = 'float') kabam_empty.gms_to_microgms = 1.e6 result = kabam_empty.lipid_norm_residue_conc(kabam_empty.out_cb_lfish, kabam_empty.lfish_lipid_frac) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_pest_conc_diet_uptake(self): """ :description Pesticide concentration in animal/organism originating from uptake through diet :unit g/kg ww :expression Kabam A1 (with k1 = 0) :param lfish_kD: pesticide uptake rate constant for uptake through ingestion of food (kg food/kg organizm - day) :param total_diet_conc: overall concentration of pesticide in diet of animal/organism (g/kg-ww) :param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d) :param lfish_kE: rate constant for elimination of the pesticide through excretion of feces (/d) :param lfish_kG: animal/organism growth rate constant (/d) :param lfish_kM: rate constant for pesticide metabolic transformation (/d) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([9.8522e-4, 1.75439e-3, 2.83849e-3], dtype = 'float') try: #for this test we'll use the large fish variables (and values that may not specifically apply to large fish kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float') kabam_empty.lfish_kd = pd.Series([0.05, 0.03, 0.02], dtype = 'float') kabam_empty.lfish_ke = pd.Series([0.05, 0.02, 0.02], dtype = 'float') kabam_empty.lfish_kg = pd.Series([0.1, 0.01, 0.003], dtype = 'float') kabam_empty.lfish_km = pd.Series([0.0, 0.1, 0.5], dtype = 'float') kabam_empty.total_diet_conc_lfish = pd.Series( [.20, .30, .50], dtype = 'float') result = kabam_empty.pest_conc_diet_uptake(kabam_empty.lfish_kd, kabam_empty.lfish_k2, kabam_empty.lfish_ke, kabam_empty.lfish_kg, kabam_empty.lfish_km, kabam_empty.total_diet_conc_lfish) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_pest_conc_respir_uptake(self): """ :description Pesticide concentration in animal/organism originating from uptake through respiration :unit g/kg ww :expression Kabam A1 (with kD = 0) :param lfish_k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d) :param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d) :param lfish_kE: rate constant for elimination of the pesticide through excretion of feces (/d) :param lfish_kG: animal/organism growth rate constant (/d) :param lfish_kM: rate constant for pesticide metabolic transformation (/d) :param lfish_mP: fraction of respiratory ventilation that involves por-water of sediment (fraction) :param lfish_mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction) :param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed via membrane diffusion (fraction) :param water_column_eec: total pesticide concentraiton in water column above sediment (g/L) :param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([9.8522167e-4, 9.746588e-5, 1.1353959e-3], dtype = 'float') try: kabam_empty.phi = pd.Series([1.0, 1.0, 1.0], dtype = 'float') kabam_empty.water_column_eec = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float') kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float') #for this test we'll use the large fish variables (and values that may not specifically apply to large fish kabam_empty.lfish_k1 = pd.Series([10., 5., 2.], dtype = 'float') kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float') kabam_empty.lfish_ke = pd.Series([0.05, 0.02, 0.02], dtype = 'float') kabam_empty.lfish_kg = pd.Series([0.1, 0.01, 0.003], dtype = 'float') kabam_empty.lfish_km = pd.Series([0.0, 0.1, 0.5], dtype = 'float') kabam_empty.lfish_mp = pd.Series([0.0, 0.0, 0.05], dtype = 'float') kabam_empty.lfish_mo = pd.Series([1.0, 1.0, 0.95], dtype = 'float') result = kabam_empty.pest_conc_respir_uptake(kabam_empty.lfish_k1, kabam_empty.lfish_k2, kabam_empty.lfish_ke, kabam_empty.lfish_kg, kabam_empty.lfish_km, kabam_empty.lfish_mp, kabam_empty.lfish_mo) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_tot_bioconc_fact(self): """ :description Total bioconcentration factor :unit (ug pesticide/kg ww) / (ug pesticide/L water) :expression Kabam Eq. F1 :param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d) :param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d) :param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction) :param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction) :param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed via membrane diffusion (fraction) :param water_column_eec: total pesticide concentraiton in water column above sediment (g/L) :param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([0.955, 1.00, 0.6666667], dtype = 'float') try: kabam_empty.phi = pd.Series([1.0, 1.0, 1.0], dtype = 'float') kabam_empty.water_column_eec = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float') kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float') #for this test we'll use the large fish variables (and values that may not specifically apply to large fish kabam_empty.lfish_k1 = pd.Series([10., 5., 2.], dtype = 'float') kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float') kabam_empty.lfish_mp = pd.Series([0.05, 0.0, 0.05], dtype = 'float') kabam_empty.lfish_mo = pd.Series([0.95, 1.0, 0.95], dtype = 'float') result = kabam_empty.tot_bioconc_fact(kabam_empty.lfish_k1, kabam_empty.lfish_k2, kabam_empty.lfish_mp, kabam_empty.lfish_mo) npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True) finally: tab = [result, expected_results] print("\n") print(inspect.currentframe().f_code.co_name) print(tabulate(tab, headers='keys', tablefmt='rst')) return def test_lipid_norm_bioconc_fact(self): """ :description Lipid normalized bioconcentration factor :unit (ug pesticide/kg lipid) / (ug pesticide/L water) :expression Kabam Eq. F2 :param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d) :param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d) :param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction) :param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction) :param lfish_lipid: fraction of animal/organism that is lipid (fraction) :param out_free_pest_conc_watercol: freely dissolved pesticide concentraiton in water column above sediment (g/L) :param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L) :return: """ # create empty pandas dataframes to create empty object for this unittest kabam_empty = self.create_kabam_object() result = pd.Series([], dtype='float') expected_results = pd.Series([47.75, 25.0, 11.1111], dtype = 'float') try: kabam_empty.out_free_pest_conc_watercol = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float') kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float') #for this test we'll use the large fish variables (and values that may not specifically apply to large fish kabam_empty.lfish_k1 =
pd.Series([10., 5., 2.], dtype = 'float')
pandas.Series
import pandas as pd import matplotlib.pyplot as plt import numpy as np #-------------read csv--------------------- df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv") df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv") df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv") df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv") df_2018_2019 =
pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
pandas.read_csv
# -*- coding: utf-8 -*- import numpy as np from pandas import Series, DataFrame, Index, Float64Index from pandas.util.testing import assert_series_equal, assert_almost_equal import pandas.util.testing as tm class TestFloatIndexers(tm.TestCase): def check(self, result, original, indexer, getitem): """ comparator for results we need to take care if we are indexing on a Series or a frame """ if isinstance(original, Series): expected = original.iloc[indexer] else: if getitem: expected = original.iloc[:, indexer] else: expected = original.iloc[indexer] assert_almost_equal(result, expected) def test_scalar_error(self): # GH 4892 # float_indexers should raise exceptions # on appropriate Index types & accessors # this duplicates the code below # but is spefically testing for the error # message for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeCategoricalIndex, tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex, tm.makeIntIndex, tm.makeRangeIndex]: i = index(5) s = Series(np.arange(len(i)), index=i) def f(): s.iloc[3.0] self.assertRaisesRegexp(TypeError, 'cannot do positional indexing', f) def f(): s.iloc[3.0] = 0 self.assertRaises(TypeError, f) def test_scalar_non_numeric(self): # GH 4892 # float_indexers should raise exceptions # on appropriate Index types & accessors for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeCategoricalIndex, tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex]: i = index(5) for s in [Series( np.arange(len(i)), index=i), DataFrame( np.random.randn( len(i), len(i)), index=i, columns=i)]: # getting for idxr, getitem in [(lambda x: x.ix, False), (lambda x: x.iloc, False), (lambda x: x, True)]: def f(): idxr(s)[3.0] # gettitem on a DataFrame is a KeyError as it is indexing # via labels on the columns if getitem and isinstance(s, DataFrame): error = KeyError else: error = TypeError self.assertRaises(error, f) # label based can be a TypeError or KeyError def f(): s.loc[3.0] if s.index.inferred_type in ['string', 'unicode', 'mixed']: error = KeyError else: error = TypeError self.assertRaises(error, f) # contains self.assertFalse(3.0 in s) # setting with a float fails with iloc def f(): s.iloc[3.0] = 0 self.assertRaises(TypeError, f) # setting with an indexer if s.index.inferred_type in ['categorical']: # Value or Type Error pass elif s.index.inferred_type in ['datetime64', 'timedelta64', 'period']: # these should prob work # and are inconsisten between series/dataframe ATM # for idxr in [lambda x: x.ix, # lambda x: x]: # s2 = s.copy() # def f(): # idxr(s2)[3.0] = 0 # self.assertRaises(TypeError, f) pass else: s2 = s.copy() s2.loc[3.0] = 10 self.assertTrue(s2.index.is_object()) for idxr in [lambda x: x.ix, lambda x: x]: s2 = s.copy() idxr(s2)[3.0] = 0 self.assertTrue(s2.index.is_object()) # fallsback to position selection, series only s = Series(np.arange(len(i)), index=i) s[3] self.assertRaises(TypeError, lambda: s[3.0]) def test_scalar_with_mixed(self): s2 = Series([1, 2, 3], index=['a', 'b', 'c']) s3 = Series([1, 2, 3], index=['a', 'b', 1.5]) # lookup in a pure string index # with an invalid indexer for idxr in [lambda x: x.ix, lambda x: x, lambda x: x.iloc]: def f(): idxr(s2)[1.0] self.assertRaises(TypeError, f) self.assertRaises(KeyError, lambda: s2.loc[1.0]) result = s2.loc['b'] expected = 2 self.assertEqual(result, expected) # mixed index so we have label # indexing for idxr in [lambda x: x.ix, lambda x: x]: def f(): idxr(s3)[1.0] self.assertRaises(TypeError, f) result = idxr(s3)[1] expected = 2 self.assertEqual(result, expected) self.assertRaises(TypeError, lambda: s3.iloc[1.0]) self.assertRaises(KeyError, lambda: s3.loc[1.0]) result = s3.loc[1.5] expected = 3 self.assertEqual(result, expected) def test_scalar_integer(self): # test how scalar float indexers work on int indexes # integer index for index in [tm.makeIntIndex, tm.makeRangeIndex]: i = index(5) for s in [Series(np.arange(len(i))), DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i)]: # coerce to equal int for idxr, getitem in [(lambda x: x.ix, False), (lambda x: x.loc, False), (lambda x: x, True)]: result = idxr(s)[3.0] self.check(result, s, 3, getitem) # coerce to equal int for idxr, getitem in [(lambda x: x.ix, False), (lambda x: x.loc, False), (lambda x: x, True)]: if isinstance(s, Series): compare = self.assertEqual expected = 100 else: compare = tm.assert_series_equal if getitem: expected = Series(100, index=range(len(s)), name=3) else: expected = Series(100., index=range(len(s)), name=3) s2 = s.copy() idxr(s2)[3.0] = 100 result = idxr(s2)[3.0] compare(result, expected) result = idxr(s2)[3] compare(result, expected) # contains # coerce to equal int self.assertTrue(3.0 in s) def test_scalar_float(self): # scalar float indexers work on a float index index = Index(np.arange(5.)) for s in [Series(np.arange(len(index)), index=index), DataFrame(np.random.randn(len(index), len(index)), index=index, columns=index)]: # assert all operations except for iloc are ok indexer = index[3] for idxr, getitem in [(lambda x: x.ix, False), (lambda x: x.loc, False), (lambda x: x, True)]: # getting result = idxr(s)[indexer] self.check(result, s, 3, getitem) # setting s2 = s.copy() def f(): idxr(s2)[indexer] = expected result = idxr(s2)[indexer] self.check(result, s, 3, getitem) # random integer is a KeyError self.assertRaises(KeyError, lambda: idxr(s)[3.5]) # contains self.assertTrue(3.0 in s) # iloc succeeds with an integer expected = s.iloc[3] s2 = s.copy() s2.iloc[3] = expected result = s2.iloc[3] self.check(result, s, 3, False) # iloc raises with a float self.assertRaises(TypeError, lambda: s.iloc[3.0]) def g(): s2.iloc[3.0] = 0 self.assertRaises(TypeError, g) def test_slice_non_numeric(self): # GH 4892 # float_indexers should raise exceptions # on appropriate Index types & accessors for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex]: index = index(5) for s in [Series(range(5), index=index), DataFrame(np.random.randn(5, 2), index=index)]: # getitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: def f(): s.iloc[l] self.assertRaises(TypeError, f) for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x.iloc, lambda x: x]: def f(): idxr(s)[l] self.assertRaises(TypeError, f) # setitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: def f(): s.iloc[l] = 0 self.assertRaises(TypeError, f) for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x.iloc, lambda x: x]: def f(): idxr(s)[l] = 0 self.assertRaises(TypeError, f) def test_slice_integer(self): # same as above, but for Integer based indexes # these coerce to a like integer # oob indiciates if we are out of bounds # of positional indexing for index, oob in [(tm.makeIntIndex(5), False), (tm.makeRangeIndex(5), False), (tm.makeIntIndex(5) + 10, True)]: # s is an in-range index s = Series(range(5), index=index) # getitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: for idxr in [lambda x: x.loc, lambda x: x.ix]: result = idxr(s)[l] # these are all label indexing # except getitem which is positional # empty if oob: indexer = slice(0, 0) else: indexer = slice(3, 5) self.check(result, s, indexer, False) # positional indexing def f(): s[l] self.assertRaises(TypeError, f) # getitem out-of-bounds for l in [slice(-6, 6), slice(-6.0, 6.0)]: for idxr in [lambda x: x.loc, lambda x: x.ix]: result = idxr(s)[l] # these are all label indexing # except getitem which is positional # empty if oob: indexer = slice(0, 0) else: indexer = slice(-6, 6) self.check(result, s, indexer, False) # positional indexing def f(): s[slice(-6.0, 6.0)] self.assertRaises(TypeError, f) # getitem odd floats for l, res1 in [(slice(2.5, 4), slice(3, 5)), (slice(2, 3.5), slice(2, 4)), (slice(2.5, 3.5), slice(3, 4))]: for idxr in [lambda x: x.loc, lambda x: x.ix]: result = idxr(s)[l] if oob: res = slice(0, 0) else: res = res1 self.check(result, s, res, False) # positional indexing def f(): s[l] self.assertRaises(TypeError, f) # setitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: for idxr in [lambda x: x.loc, lambda x: x.ix]: sc = s.copy() idxr(sc)[l] = 0 result = idxr(sc)[l].values.ravel() self.assertTrue((result == 0).all()) # positional indexing def f(): s[l] = 0 self.assertRaises(TypeError, f) def test_integer_positional_indexing(self): """ make sure that we are raising on positional indexing w.r.t. an integer index """ s = Series(range(2, 6), index=range(2, 6)) result = s[2:4] expected = s.iloc[2:4] assert_series_equal(result, expected) for idxr in [lambda x: x, lambda x: x.iloc]: for l in [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]: def f(): idxr(s)[l] self.assertRaises(TypeError, f) def test_slice_integer_frame_getitem(self): # similar to above, but on the getitem dim (of a DataFrame) for index in [tm.makeIntIndex, tm.makeRangeIndex]: index = index(5) s = DataFrame(np.random.randn(5, 2), index=index) for idxr in [lambda x: x.loc, lambda x: x.ix]: # getitem for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]: result = idxr(s)[l] indexer = slice(0, 2) self.check(result, s, indexer, False) # positional indexing def f(): s[l] self.assertRaises(TypeError, f) # getitem out-of-bounds for l in [slice(-10, 10), slice(-10.0, 10.0)]: result = idxr(s)[l] self.check(result, s, slice(-10, 10), True) # positional indexing def f(): s[slice(-10.0, 10.0)] self.assertRaises(TypeError, f) # getitem odd floats for l, res in [(slice(0.5, 1), slice(1, 2)), (slice(0, 0.5), slice(0, 1)), (slice(0.5, 1.5), slice(1, 2))]: result = idxr(s)[l] self.check(result, s, res, False) # positional indexing def f(): s[l] self.assertRaises(TypeError, f) # setitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: sc = s.copy() idxr(sc)[l] = 0 result = idxr(sc)[l].values.ravel() self.assertTrue((result == 0).all()) # positional indexing def f(): s[l] = 0 self.assertRaises(TypeError, f) def test_slice_float(self): # same as above, but for floats index = Index(np.arange(5.)) + 0.1 for s in [Series(range(5), index=index), DataFrame(np.random.randn(5, 2), index=index)]: for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: expected = s.iloc[3:4] for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x]: # getitem result = idxr(s)[l] self.assertTrue(result.equals(expected)) # setitem s2 = s.copy() idxr(s2)[l] = 0 result = idxr(s2)[l].values.ravel() self.assertTrue((result == 0).all()) def test_floating_index_doc_example(self): index = Index([1.5, 2, 3, 4.5, 5]) s = Series(range(5), index=index) self.assertEqual(s[3], 2) self.assertEqual(s.ix[3], 2) self.assertEqual(s.loc[3], 2) self.assertEqual(s.iloc[3], 3) def test_floating_misc(self): # related 236 # scalar/slicing of a float index s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64) # label based slicing result1 = s[1.0:3.0] result2 = s.ix[1.0:3.0] result3 = s.loc[1.0:3.0] assert_series_equal(result1, result2) assert_series_equal(result1, result3) # exact indexing when found result1 = s[5.0] result2 = s.loc[5.0] result3 = s.ix[5.0] self.assertEqual(result1, result2) self.assertEqual(result1, result3) result1 = s[5] result2 = s.loc[5] result3 = s.ix[5] self.assertEqual(result1, result2) self.assertEqual(result1, result3) self.assertEqual(s[5.0], s[5]) # value not found (and no fallbacking at all) # scalar integers self.assertRaises(KeyError, lambda: s.loc[4]) self.assertRaises(KeyError, lambda: s.ix[4]) self.assertRaises(KeyError, lambda: s[4]) # fancy floats/integers create the correct entry (as nan) # fancy tests expected = Series([2, 0], index=Float64Index([5.0, 0.0])) for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float assert_series_equal(s[fancy_idx], expected) assert_series_equal(s.loc[fancy_idx], expected) assert_series_equal(s.ix[fancy_idx], expected) expected = Series([2, 0], index=Index([5, 0], dtype='int64')) for fancy_idx in [[5, 0], np.array([5, 0])]: # int assert_series_equal(s[fancy_idx], expected) assert_series_equal(s.loc[fancy_idx], expected) assert_series_equal(s.ix[fancy_idx], expected) # all should return the same as we are slicing 'the same' result1 = s.loc[2:5] result2 = s.loc[2.0:5.0] result3 = s.loc[2.0:5] result4 = s.loc[2.1:5] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, result4) # previously this did fallback indexing result1 = s[2:5] result2 = s[2.0:5.0] result3 = s[2.0:5] result4 = s[2.1:5] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, result4) result1 = s.ix[2:5] result2 = s.ix[2.0:5.0] result3 = s.ix[2.0:5] result4 = s.ix[2.1:5] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, result4) # combined test result1 = s.loc[2:5] result2 = s.ix[2:5] result3 = s[2:5] assert_series_equal(result1, result2) assert_series_equal(result1, result3) # list selection result1 = s[[0.0, 5, 10]] result2 = s.loc[[0.0, 5, 10]] result3 = s.ix[[0.0, 5, 10]] result4 = s.iloc[[0, 2, 4]] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, result4) result1 = s[[1.6, 5, 10]] result2 = s.loc[[1.6, 5, 10]] result3 = s.ix[[1.6, 5, 10]] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, Series( [np.nan, 2, 4], index=[1.6, 5, 10])) result1 = s[[0, 1, 2]] result2 = s.ix[[0, 1, 2]] result3 = s.loc[[0, 1, 2]] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1, Series( [0.0, np.nan, np.nan], index=[0, 1, 2])) result1 = s.loc[[2.5, 5]] result2 = s.ix[[2.5, 5]] assert_series_equal(result1, result2) assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0])) result1 = s[[2.5]] result2 = s.ix[[2.5]] result3 = s.loc[[2.5]] assert_series_equal(result1, result2) assert_series_equal(result1, result3) assert_series_equal(result1,
Series([1], index=[2.5])
pandas.Series
#!/usr/bin/env python # -*- coding:utf-8 -*- # @Time : 2019/5/5 15:12 # @Author: <NAME> # @File : Tech191.py import os import sys import re import multiprocessing import numpy as np import pandas as pd from numpy import abs from numpy import log from numpy import sign import scipy as sp import tushare as ts from pyfinance.ols import OLS, PandasRollingOLS CurrentPath = os.path.abspath(os.path.dirname(__file__)) # 设置绝对路径 Pre_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) sys.path += [CurrentPath, Pre_path, Pre_path + '\\Engine'] FactorPath = Pre_path + '\\FactorData\\' from TechFunc import * # noqa from LoggingPlus import Logger # noqa class Alpha191(object): ''' 国泰君安191个短周期技术因子 ''' def __init__(self, startdate, enddate, count, length): ''' 获取数据信息 :param df_data: ''' stock_price = get_price(startdate=startdate, enddate=enddate, fields=[ 'open_qfq', 'close_qfq', 'low_qfq', 'high_qfq', 'volume', 'amount', 'ret' ], count=count) benchmark_price = get_price(startdate=startdate, enddate=enddate, fields=['index'], count=count) self.open = stock_price['open_qfq'] self.close = stock_price['close_qfq'] self.low = stock_price['low_qfq'] self.high = stock_price['high_qfq'] self.volume = stock_price['volume'] * 100 self.amount = stock_price['amount'] * 1000 self.returns = stock_price['ret'] self.benchmark_open = benchmark_price['index']['沪深300open'] self.benchmark_close = benchmark_price['index']['沪深300close'] self.vwap = stock_price['amount'] / (stock_price['volume'] + 0.001) self.pre_close = self.close.shift(1).fillna(0) self.length = length np.seterr(divide='ignore', invalid='ignore') # 忽略警告 ''' 1.dataframe与dataframe,0,1比较用np.maximum和np.minimum 与其他数据比较用ts_max和ts_min,属于时间序列 2.公式中的幂次方计算一律用pow()函数 3.benchmark_open与benchmark_close为series 4.逻辑运算符一律用&, |表示 ''' # (-1 * CORR(RANK(DELTA(LOG(VOLUME), 1)), RANK(((CLOSE - OPEN) / OPEN)), 6)) def tech001(self): tech001 = (-1*correlation(rank(delta(log(self.volume), 1)), rank(((self.close-self.open)/self.open)), 6)) save_hdf(tech001, 'tech001', self.length) return # (-1 * DELTA((((CLOSE - LOW) - (HIGH - CLOSE)) / (HIGH - LOW)), 1)) def tech002(self): tech002 = (-1*delta((((self.close-self.low) - (self.high-self.close))/(self.high-self.low)), 1)) save_hdf(tech002, 'tech002', self.length) return # SUM((CLOSE=DELAY(CLOSE,1)?0:CLOSE-(CLOSE>DELAY(CLOSE,1)?MIN(LOW,DELAY(CLOSE,1)):MAX(HIGH,DELAY(CLOSE,1)))),6) def tech003(self): part1 = self.close part1[self.close == delay(self.close, 1)] = 0 part2 = np.maximum(self.high, delay(self.close, 1)) part2[self.close > delay(self.close, 1)] = np.minimum( self.low, delay(self.close, 1)) tech003 = pd.DataFrame( ts_sum(part1-part2, 6), index=self.close.index, columns=self.close.columns) save_hdf(tech003, 'tech003', self.length) return # ((((SUM(CLOSE, 8) / 8) + STD(CLOSE, 8)) < (SUM(CLOSE, 2) / 2)) ? (-1 * 1) : (((SUM(CLOSE, 2) / 2) < # ((SUM(CLOSE, 8) / 8) - STD(CLOSE, 8))) ? 1 : (((1 < (VOLUME / MEAN(VOLUME,20))) || ((VOLUME / # MEAN(VOLUME,20)) == 1)) ? 1 : (-1 * 1)))) def tech004(self): cond1 = ((ts_sum(self.close, 8) / 8 + stddev(self.close, 8)) < ts_sum(self.close, 2) / 2) cond2 = (ts_sum(self.close, 2) / 2 < (ts_sum(self.close, 8) / 8 - stddev(self.close, 8))) cond3 = (1 <= self.volume / mean(self.volume, 20)) tech004 = -1 * pd.DataFrame(np.ones(self.close.shape), index=self.close.index, columns=self.close.columns) tech004[cond1] = -1 tech004[cond2] = 1 tech004[cond3] = 1 save_hdf(tech004, 'tech004', self.length) return # (-1 * TSMAX(CORR(TSRANK(VOLUME, 5), TSRANK(HIGH, 5), 5), 3)) def tech005(self): tech005 = (-1*ts_max(correlation(ts_rank(self.volume, 5), ts_rank(self.high, 5), 5), 5)) save_hdf(tech005, 'tech005', self.length) return # (RANK(SIGN(DELTA((((OPEN * 0.85) + (HIGH * 0.15))), 4)))* -1) def tech006(self): tech006 = (rank(sign(delta((self.open*0.85+self.high*0.15), 4)))*(-1)) save_hdf(tech006, 'tech006', self.length) return # ((RANK(MAX((VWAP - CLOSE), 3)) + RANK(MIN((VWAP - CLOSE), 3))) * RANK(DELTA(VOLUME, 3))) def tech007(self): tech007 = ((rank(ts_max((self.vwap-self.close), 3)) + rank(ts_min((self.vwap-self.close), 3))) * rank(delta(self.volume, 3))) save_hdf(tech007, 'tech007', self.length) return # RANK(DELTA(((((HIGH + LOW) / 2) * 0.2) + (VWAP * 0.8)), 4) * -1) def tech008(self): tech008 = rank( delta(((((self.high+self.low)/2)*0.2)+(self.vwap*0.8)), 4)*(-1)) save_hdf(tech008, 'tech008', self.length) return # SMA(((HIGH+LOW)/2-(DELAY(HIGH,1)+DELAY(LOW,1))/2)*(HIGH-LOW)/VOLUME,7,2) def tech009(self): tech009 = sma(((self.high+self.low)/2-(delay(self.high, 1) + delay(self.low, 1))/2)*(self.high-self.low)/self.volume, 7, 2) save_hdf(tech009, 'tech009', self.length) return # (RANK(MAX(((RET < 0) ? STD(RET, 20) : CLOSE)^2),5)) def tech010(self): part1 = self.returns part1[self.returns < 0] = stddev(self.returns, 20) tech010 = rank(ts_max(pow(part1, 2), 5)) save_hdf(tech010, 'tech010', self.length) return # SUM(((CLOSE-LOW)-(HIGH-CLOSE))./(HIGH-LOW).*VOLUME,6) def tech011(self): tech011 = ts_sum(((self.close-self.low)-(self.high - self.close))/(self.high-self.low)*self.volume, 6) save_hdf(tech011, 'tech011', self.length) return # (RANK((OPEN - (SUM(VWAP, 10) / 10)))) * (-1 * (RANK(ABS((CLOSE - VWAP))))) def tech012(self): tech012 = (rank((self.open - (ts_sum(self.vwap, 10) / 10))) ) * (-1 * (rank(abs((self.close - self.vwap))))) save_hdf(tech012, 'tech012', self.length) return # (((HIGH * LOW)^0.5) - VWAP) def tech013(self): tech013 = (pow((self.high * self.low), 0.5) - self.vwap) save_hdf(tech013, 'tech013', self.length) return # CLOSE-DELAY(CLOSE,5) def tech014(self): tech014 = self.close-delay(self.close, 5) save_hdf(tech014, 'tech014', self.length) return # OPEN/DELAY(CLOSE,1)-1 def tech015(self): tech015 = self.open/delay(self.close, 1)-1 save_hdf(tech015, 'tech015', self.length) return # (-1 * TSMAX(RANK(CORR(RANK(VOLUME), RANK(VWAP), 5)), 5)) def tech016(self): tech016 = (-1*ts_max(rank(correlation(rank(self.volume), rank(self.vwap), 5)), 5)) save_hdf(tech016, 'tech016', self.length) return # RANK((VWAP - MAX(VWAP, 15)))^DELTA(CLOSE, 5) def tech017(self): tech017 = pow(rank((self.vwap - ts_max(self.vwap, 15))), delta(self.close, 5)) save_hdf(tech017, 'tech017', self.length) return # CLOSE/DELAY(CLOSE,5) def tech018(self): tech018 = self.close/delay(self.close, 5) save_hdf(tech018, 'tech018', self.length) return # (CLOSE<DELAY(CLOSE,5)?(CLOSE-DELAY(CLOSE,5))/DELAY(CLOSE,5):(CLOSE=DELAY(CLOSE,5)?0:(CLOSE-DELAY(CLOSE,5))/CLOSE)) def tech019(self): cond1 = (self.close < delay(self.close, 5)) cond2 = (self.close == delay(self.close, 5)) tech019 = (self.close-delay(self.close, 5))/self.close tech019[cond1] = (self.close-delay(self.close, 5))/delay(self.close, 5) tech019[cond2] = 0 save_hdf(tech019, 'tech019', self.length) return # (CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*100 def tech020(self): tech020 = (self.close-delay(self.close, 6))/delay(self.close, 6)*100 save_hdf(tech020, 'tech020', self.length) return # REGBETA(MEAN(CLOSE,6),SEQUENCE(6)) def tech021(self): tech021 = regbeta(mean(self.close, 6), sequence(6), 6) save_hdf(tech021, 'tech021', self.length) return # SMA(((CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6)-DELAY((CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6),3)),12,1) def tech022(self): tech022 = sma(((self.close-mean(self.close, 6))/mean(self.close, 6) - delay((self.close-mean(self.close, 6))/mean(self.close, 6), 3)), 12, 1) save_hdf(tech022, 'tech022', self.length) return # SMA((CLOSE>DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1)/(SMA((CLOSE>DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1 )+SMA((CLOSE<=DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1))*100 def tech023(self): cond1 = (self.close > delay(self.close, 1)) cond2 = (self.close <= delay(self.close, 1)) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = stddev(self.close, 20) part2 = sma(part1, 20, 1) part3 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part3[cond2] = stddev(self.close, 20) part4 = sma(part3, 20, 1) tech023 = part2/(part2+part4) save_hdf(tech023, 'tech023', self.length) return # SMA(CLOSE-DELAY(CLOSE,5),5,1) def tech024(self): tech024 = sma(self.close-delay(self.close, 5), 5, 1) save_hdf(tech024, 'tech024', self.length) return # ((-1 * RANK((DELTA(CLOSE, 7) * (1 - RANK(DECAYLINEAR((VOLUME / MEAN(VOLUME,20)), 9)))))) * (1 + RANK(SUM(RET, 250)))) def tech025(self): tech025 = ((-1 * rank((delta(self.close, 7) * (1 - rank(decay_linear((self.volume / mean(self.volume, 20)), 9)))))) * (1 + rank(ts_sum(self.returns, 250)))) save_hdf(tech025, 'tech025', self.length) return # ((((SUM(CLOSE, 7) / 7) - CLOSE)) + ((CORR(VWAP, DELAY(CLOSE, 5), 230)))) def tech026(self): tech026 = ((((ts_sum(self.close, 7) / 7) - self.close)) + ((correlation(self.vwap, delay(self.close, 5), 230)))) save_hdf(tech026, 'tech026', self.length) return # WMA((CLOSE-DELAY(CLOSE,3))/DELAY(CLOSE,3)*100+(CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*100,12) def tech027(self): tech027 = wma((self.close-delay(self.close, 3))/delay(self.close, 3) * 100+(self.close-delay(self.close, 6))/delay(self.close, 6)*100, 12) save_hdf(tech027, 'tech027', self.length) return # 3*SMA((CLOSE-TSMIN(LOW,9))/(TSMAX(HIGH,9)-TSMIN(LOW,9))*100,3,1)-2*SMA(SMA((CLOSE-TSMIN(LOW,9))/( MAX(HIGH,9)-TSMAX(LOW,9))*100,3,1),3,1) def tech028(self): tech028 = 3*sma((self.close-ts_min(self.low, 9))/(ts_max(self.high, 9)-ts_min(self.low, 9))*100, 3, 1) - \ 2*sma(sma((self.close-ts_min(self.low, 9)) / (ts_max(self.high, 9)-ts_max(self.low, 9))*100, 3, 1), 3, 1) save_hdf(tech028, 'tech028', self.length) return # (CLOSE-DELAY(CLOSE,6))/DELAY(CLOSE,6)*VOLUME def tech029(self): tech029 = (self.close-delay(self.close, 6)) / \ delay(self.close, 6)*self.volume save_hdf(tech029, 'tech029', self.length) return # # WMA((REGRESI(CLOSE/DELAY(CLOSE)-1,MKT,SMB,HML,60))^2,20) # def tech030(self): # return 0 # (CLOSE-MEAN(CLOSE,12))/MEAN(CLOSE,12)*100 def tech031(self): tech031 = (self.close-mean(self.close, 12))/mean(self.close, 12)*100 save_hdf(tech031, 'tech031', self.length) return # (-1 * SUM(RANK(CORR(RANK(HIGH), RANK(VOLUME), 3)), 3)) def tech032(self): tech032 = (-1*ts_sum(rank(correlation(rank(self.high), rank(self.volume), 3)), 3)) save_hdf(tech032, 'tech032', self.length) return # ((((-1 * TSMIN(LOW, 5)) + DELAY(TSMIN(LOW, 5), 5)) * RANK(((SUM(RET, 240) - SUM(RET, 20)) / 220))) * TSRANK(VOLUME, 5)) def tech033(self): tech033 = ((((-1*ts_min(self.low, 5))+delay(ts_min(self.low, 5), 5))*rank( ((ts_sum(self.returns, 240)-ts_sum(self.returns, 20))/220)))*ts_rank(self.volume, 5)) save_hdf(tech033, 'tech033', self.length) return # MEAN(CLOSE,12)/CLOSE def tech034(self): tech034 = mean(self.close, 12)/self.close save_hdf(tech034, 'tech034', self.length) return # (MIN(RANK(DECAYLINEAR(DELTA(OPEN, 1), 15)), RANK(DECAYLINEAR(CORR((VOLUME), ((OPEN * 0.65) + (OPEN *0.35)), 17),7))) * -1) def tech035(self): part1 = (np.minimum(rank(decay_linear(delta(self.open, 1), 15)), rank(decay_linear( correlation((self.volume), ((self.open * 0.65) + (self.open * 0.35)), 17), 7))) * -1) tech035 = pd.DataFrame( part1, index=self.close.index, columns=self.close.columns) save_hdf(tech035, 'tech035', self.length) return # RANK(SUM(CORR(RANK(VOLUME), RANK(VWAP))6, 2)) def tech036(self): tech036 = rank( ts_sum(correlation(rank(self.volume), rank(self.vwap), 6), 2)) save_hdf(tech036, 'tech036', self.length) return # (-1 * RANK(((SUM(OPEN, 5) * SUM(RET, 5)) - DELAY((SUM(OPEN, 5) * SUM(RET, 5)), 10)))) def tech037(self): tech037 = (-1*rank(((ts_sum(self.open, 5)*ts_sum(self.returns, 5)) - delay((ts_sum(self.open, 5)*ts_sum(self.returns, 5)), 10)))) save_hdf(tech037, 'tech037', self.length) return # (((SUM(HIGH, 20) / 20) < HIGH) ? (-1 * DELTA(HIGH, 2)) : 0) def tech038(self): cond1 = ((ts_sum(self.high, 20)/20) < self.high) tech038 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) tech038[cond1] = (-1*delta(self.high, 2)) save_hdf(tech038, 'tech038', self.length) return # ((RANK(DECAYLINEAR(DELTA((CLOSE), 2),8)) - RANK(DECAYLINEAR(CORR(((VWAP * 0.3) + (OPEN * 0.7)), SUM(MEAN(VOLUME,180), 37), 14), 12))) * -1) def tech039(self): tech039 = ((rank(decay_linear(delta((self.close), 2), 8)) - rank(decay_linear(correlation( ((self.vwap * 0.3) + (self.open * 0.7)), ts_sum(mean(self.vwap, 180), 37), 14), 12))) * -1) save_hdf(tech039, 'tech039', self.length) return # SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:0),26)/SUM((CLOSE<=DELAY(CLOSE,1)?VOLUME:0),26)*100 def tech040(self): cond1 = (self.close > delay(self.close, 1)) cond2 = (self.close <= delay(self.close, 1)) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = self.volume part2 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part2[cond2] = self.volume tech040 = ts_sum(part1, 26)/ts_sum(part2, 26)*100 save_hdf(tech040, 'tech040', self.length) return # (RANK(MAX(DELTA((VWAP), 3), 5))* -1) def tech041(self): tech041 = (rank(ts_max(delta((self.vwap), 3), 5)) * -1) save_hdf(tech041, 'tech041', self.length) return # ((-1 * RANK(STD(HIGH, 10))) * CORR(HIGH, VOLUME, 10)) def tech042(self): tech042 = ((-1 * rank(stddev(self.high, 10))) * correlation(self.high, self.volume, 10)) save_hdf(tech042, 'tech042', self.length) return # SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:(CLOSE<DELAY(CLOSE,1)?-VOLUME:0)),6) def tech043(self): cond1 = (self.close > delay(self.close, 1)) cond2 = (self.close < delay(self.close, 1)) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = self.volume part1[cond2] = -self.volume tech043 = ts_sum(part1, 6) save_hdf(tech043, 'tech043', self.length) return # (TSRANK(DECAYLINEAR(CORR(((LOW )), MEAN(VOLUME,10), 7), 6),4) + TSRANK(DECAYLINEAR(DELTA((VWAP), 3), 10), 15)) def tech044(self): tech044 = (ts_rank(decay_linear(correlation(((self.low)), mean( self.volume, 10), 7), 6), 4) + ts_rank(decay_linear(delta((self.vwap), 3), 10), 15)) save_hdf(tech044, 'tech044', self.length) return # (RANK(DELTA((((CLOSE * 0.6) + (OPEN *0.4))), 1)) * RANK(CORR(VWAP, MEAN(VOLUME,150), 15))) def tech045(self): tech045 = (rank(delta((((self.close * 0.6) + (self.open * 0.4))), 1)) * rank(correlation(self.vwap, mean(self.volume, 150), 15))) save_hdf(tech045, 'tech045', self.length) return # (MEAN(CLOSE,3)+MEAN(CLOSE,6)+MEAN(CLOSE,12)+MEAN(CLOSE,24))/(4*CLOSE) def tech046(self): tech046 = (mean(self.close, 3)+mean(self.close, 6) + mean(self.close, 12)+mean(self.close, 24))/(4*self.close) save_hdf(tech046, 'tech046', self.length) return # SMA((TSMAX(HIGH,6)-CLOSE)/(TSMAX(HIGH,6)-TSMIN(LOW,6))*100,9,1) def tech047(self): tech047 = sma((ts_max(self.high, 6) - self.close) / (ts_max(self.high, 6) - ts_min(self.low, 6)) * 100, 9, 1) save_hdf(tech047, 'tech047', self.length) return # (-1*((RANK(((SIGN((CLOSE - DELAY(CLOSE, 1))) + SIGN((DELAY(CLOSE, 1) - DELAY(CLOSE, 2)))) # + SIGN((DELAY(CLOSE, 2) - DELAY(CLOSE, 3)))))) * SUM(VOLUME, 5)) / SUM(VOLUME, 20)) def tech048(self): tech048 = (-1*((ts_rank(((sign((self.close - delay(self.close, 1))) + sign((delay(self.close, 1) - delay(self.close, 2)))) + sign((delay(self.close, 2) - delay(self.close, 3)))))) * ts_sum(self.volume, 5)) / ts_sum(self.volume, 20)) save_hdf(tech048, 'tech048', self.length) return # SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)), # ABS(LOW-DELAY(L OW,1)))),12)/(SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1)) # ?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(L OW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW) # <=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HI GH,1)),ABS(LOW-DELAY(LOW,1)))),12)) def tech049(self): cond1 = (self.high + self.low) <= (delay(self.high, 1) + delay(self.low, 1)) cond2 = (self.high + self.low) >= (delay(self.high, 1) + delay(self.low, 1)) part1 = np.maximum(abs(self.high - delay(self.high, 1)), abs(self.low - delay(self.low, 1))) part1[cond1] = 0 part2 = np.maximum(abs(self.high - delay(self.high, 1)), abs(self.low - delay(self.low, 1))) part2[cond2] = 0 tech049 = ts_sum(part2, 12) / (ts_sum(part2, 12) + ts_sum(part1, 12)) save_hdf(tech049, 'tech049', self.length) return # SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)), # ABS(LOW-DELAY(L OW,1)))),12)/(SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1) # )?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(L OW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW) # >=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HI GH,1)),ABS(LOW-DELAY(LOW,1)))) # ,12))-SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HI GH-DELAY(HIGH,1)), # ABS(LOW-DELAY(LOW,1)))),12)/(SUM(((HIGH+LOW)>=(DELAY(HIGH,1)+DELAY(LOW,1))?0: # MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW)<= # (DELAY(HIGH,1)+DELA Y(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)) def tech050(self): cond1 = (self.high + self.low) <= (delay(self.high, 1) + delay(self.low, 1)) cond2 = (self.high + self.low) >= (delay(self.high, 1) + delay(self.low, 1)) part1 = np.maximum(abs(self.high - delay(self.high, 1)), abs(self.low - delay(self.low, 1))) part1[cond1] = 0 part2 = np.maximum(abs(self.high - delay(self.high, 1)), abs(self.low - delay(self.low, 1))) part2[cond2] = 0 tech050 = (ts_sum(part1, 12) - ts_sum(part2, 12)) / \ (ts_sum(part1, 12) + ts_sum(part2, 12)) save_hdf(tech050, 'tech050', self.length) return # SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)), # ABS(LOW-DELAY(L OW,1)))),12)/(SUM(((HIGH+LOW)<=(DELAY(HIGH,1)+DELAY(LOW,1)) # ?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)+SUM(((HIGH+LOW) # >=(DELAY(HIGH,1)+DELAY(LOW,1))?0:MAX(ABS(HIGH-DELAY(HIGH,1)),ABS(LOW-DELAY(LOW,1)))),12)) def tech051(self): cond1 = (self.high+self.low) <= (delay(self.high, 1)+delay(self.low, 1)) cond2 = (self.high+self.low) >= (delay(self.high, 1)+delay(self.low, 1)) part1 = np.maximum(abs(self.high-delay(self.high, 1)), abs(self.low-delay(self.low, 1))) part1[cond1] = 0 part2 = np.maximum(abs(self.high-delay(self.high, 1)), abs(self.low-delay(self.low, 1))) part2[cond2] = 0 tech051 = ts_sum(part1, 12)/(ts_sum(part1, 12)+ts_sum(part2, 12)) save_hdf(tech051, 'tech051', self.length) return # SUM(MAX(0,HIGH-DELAY((HIGH+LOW+CLOSE)/3,1)),26)/SUM(MAX(0,DELAY((HIGH+LOW+CLOSE)/3-LOW,1)),26)* 100 def tech052(self): tech052 = ts_sum(np.maximum(0, self.high-delay((self.high+self.low+self.close)/3, 1)), 26) / \ ts_sum(np.maximum( 0, delay((self.high+self.low+self.close)/3-self.low, 1)), 26)*100 save_hdf(tech052, 'tech052', self.length) return # COUNT(CLOSE>DELAY(CLOSE,1),12)/12*100 def tech053(self): cond1 = (self.close > delay(self.close, 1)) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = 1 tech053 = count(part1, 12) / 12 * 100 save_hdf(tech053, 'tech053', self.length) return # (-1 * RANK((STD(ABS(CLOSE - OPEN)) + (CLOSE - OPEN)) + CORR(CLOSE, OPEN,10))) def tech054(self): tech054 = (-1*rank((stddev(abs(self.close-self.open)) + (self.close-self.open))+correlation(self.close, self.open, 10))) save_hdf(tech054, 'tech054', self.length) return # SUM(16*(CLOSE-DELAY(CLOSE,1)+(CLOSE-OPEN)/2+DELAY(CLOSE,1)-DELAY(OPEN,1))/ # ((ABS(HIGH-DELAY(CL OSE,1))>ABS(LOW-DELAY(CLOSE,1)) & ABS(HIGH-DELAY(CLOSE,1)) # >ABS(HIGH-DELAY(LOW,1))?ABS(HIGH-DELAY(CLOSE,1))+ABS(LOW-DELAY(CLOS E,1))/2 # +ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:(ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(LOW,1)) # & ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(CLOSE,1))?ABS(LOW-DELAY(CLOSE,1))+ # ABS(HIGH-DELAY(CLO SE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:ABS(HIGH-DELAY(LOW,1)) # +ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4)))*MAX(ABS(HIGH-DELAY(CLOSE,1)),ABS(LOW-DELAY(CLOSE,1))),20) def tech055(self): cond1 = (abs(self.high - delay(self.close, 1)) > abs(self.low - delay(self.close, 1)) ) & (abs(self.high - delay(self.close, 1)) > abs(self.high - delay(self.low, 1))) cond2 = (abs(self.low - delay(self.close, 1)) > abs(self.high - delay(self.low, 1)) ) & (abs(self.low - delay(self.close, 1)) > abs(self.high - delay(self.close, 1))) part1 = abs(self.high - delay(self.low, 1)) + \ abs(delay(self.close, 1) - delay(self.open, 1)) / 4 part1[cond1] = abs(self.high - delay(self.close, 1)) + abs(self.low - delay( self.close, 1)) / 2 + abs(delay(self.close, 1) - delay(self.open, 1)) / 4 part1[cond2] = abs(self.low - delay(self.close, 1)) + abs(self.high - delay( self.close, 1)) / 2 + abs(delay(self.close, 1) - delay(self.open, 1)) / 4 tech055 = ts_sum(16 * (self.close - delay(self.close, 1) + (self.close - self.open) / 2 + delay(self.close, 1) - delay( self.open, 1)) / part1 * np.maximum(abs(self.high - delay(self.close, 1)), abs(self.low - delay(self.close, 1))), 20) save_hdf(tech055, 'tech055', self.length) return # (RANK((OPEN - TSMIN(OPEN, 12))) < RANK((RANK(CORR(SUM(((HIGH + LOW) / 2), 19),SUM(MEAN(VOLUME,40), 19), 13))^5))) def tech056(self): tech056 = ((rank((self.open - ts_min(self.open, 12))) < rank(pow(rank(correlation(ts_sum(((self.high + self.low) / 2), 19), ts_sum(mean(self.volume, 40), 19), 13)), 5)))*1) save_hdf(tech056, 'tech056', self.length) return # SMA((CLOSE-TSMIN(LOW,9))/(TSMAX(HIGH,9)-TSMIN(LOW,9))*100,3,1) def tech057(self): tech057 = sma((self.close-ts_min(self.low, 9)) / (ts_max(self.high, 9))*100, 3, 1) save_hdf(tech057, 'tech057', self.length) return # COUNT(CLOSE>DELAY(CLOSE,1),20)/20*100 def tech058(self): cond1 = (self.close > delay(self.close, 1)) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = 1 tech058 = count(part1, 20)/20*100 save_hdf(tech058, 'tech058', self.length) return # SUM((CLOSE=DELAY(CLOSE,1)?0:CLOSE-(CLOSE>DELAY(CLOSE,1)?MIN(LOW,DELAY(CLOSE,1)):MAX(HIGH,D ELAY(CLOSE,1)))),20) def tech059(self): cond1 = (self.close == delay(self.close, 1)) cond2 = (self.close > delay(self.close, 1)) part1 = self.close part1[cond1] = 0 part2 = np.maximum(self.high, delay(self.close, 1)) part2[cond2] = np.minimum(self.low, delay(self.close, 1)) tech059 = ts_sum(part1-part2, 20) save_hdf(tech059, 'tech059', self.length) return # SUM(((CLOSE-LOW)-(HIGH-CLOSE))./(HIGH-LOW).*VOLUME,20) def tech060(self): tech060 = ts_sum(((self.close-self.low)-(self.high - self.close))/(self.high-self.low)*self.volume, 20) save_hdf(tech060, 'tech060', self.length) return # (MAX(RANK(DECAYLINEAR(DELTA(VWAP, 1), 12)),RANK(DECAYLINEAR(RANK(CORR(LOW,MEAN(VOLUME,80), 8)), 17))) * -1) def tech061(self): tech061 = (np.maximum(rank(decay_linear(delta(self.vwap, 1), 12)), rank( decay_linear(rank(correlation(self.low, mean(self.volume, 80), 8)), 17))) * -1) save_hdf(tech061, 'tech061', self.length) return # (-1 * CORR(HIGH, RANK(VOLUME), 5)) def tech062(self): tech062 = (-1*correlation(self.high, rank(self.volume), 5)) save_hdf(tech062, 'tech062', self.length) return # SMA(MAX(CLOSE-DELAY(CLOSE,1),0),6,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),6,1)*100 def tech063(self): tech063 = sma(np.maximum(self.close-delay(self.close, 1), 0), 6, 1)/sma(abs(self.close-delay(self.close, 1)), 6, 1)*100 save_hdf(tech063, 'tech063', self.length) return # (MAX(RANK(DECAYLINEAR(CORR(RANK(VWAP), RANK(VOLUME), 4), 4)),RANK(DECAYLINEAR(MAX(CORR(RANK(CLOSE), RANK(MEAN(VOLUME,64)), 4), 13), 14))) * -1) def tech064(self): tech064 = (np.maximum(rank(decay_linear(correlation(rank(self.vwap), rank(self.volume), 4), 4)), rank( decay_linear(ts_max(correlation(rank(self.close), rank(mean(self.volume, 64)), 4), 13), 14))) * -1) save_hdf(tech064, 'tech064', self.length) return # MEAN(CLOSE,6)/CLOSE def tech065(self): tech065 = mean(self.close, 6)/self.close save_hdf(tech065, 'tech065', self.length) return # (CLOSE-MEAN(CLOSE,6))/MEAN(CLOSE,6)*100 def tech066(self): tech066 = (self.close-mean(self.close, 6))/mean(self.close, 6)*100 save_hdf(tech066, 'tech066', self.length) return ################################################################## def tech067(self): tech067 = sma(np.maximum(self.close-delay(self.close, 1), 0), 24, 1)/sma(abs(self.close-delay(self.close, 1)), 24, 1)*100 save_hdf(tech067, 'tech067', self.length) return # SMA(((HIGH+LOW)/2-(DELAY(HIGH,1)+DELAY(LOW,1))/2)*(HIGH-LOW)/VOLUME,15,2) def tech068(self): tech068 = sma(((self.high+self.low)/2-(delay(self.high, 1) + delay(self.low, 1))/2)*(self.high-self.low)/self.volume, 15, 2) save_hdf(tech068, 'tech068', self.length) return # (SUM(DTM,20)>SUM(DBM,20)?(SUM(DTM,20)-SUM(DBM,20))/SUM(DTM,20):(SUM(DTM,20)=SUM(DBM,20)?0:(SUM(DTM,20)-SUM(DBM,20))/SUM(DBM,20))) def tech069(self): cond1 = (self.open <= delay(self.open, 1)) DTM = np.maximum((self.high-self.open), (self.open-delay(self.open, 1))) DTM[cond1] = 0 cond2 = (self.open >= delay(self.open, 1)) DBM = np.maximum((self.open-self.low), (self.open-delay(self.open, 1))) DBM[cond2] = 0 cond3 = ts_sum(DTM, 20) > ts_sum(DBM, 20) cond4 = ts_sum(DTM, 20) == ts_sum(DBM, 20) tech069 = (ts_sum(DTM, 20)-ts_sum(DBM, 20))/ts_sum(DBM, 20) tech069[cond3] = (ts_sum(DTM, 20)-ts_sum(DBM, 20))/ts_sum(DTM, 20) tech069[cond4] = 0 save_hdf(tech069, 'tech069', self.length) return # STD(AMOUNT, 6) def tech070(self): tech070 = stddev(self.amount, 6) save_hdf(tech070, 'tech070', self.length) return # (CLOSE-MEAN(CLOSE,24))/MEAN(CLOSE,24)*100 def tech071(self): tech071 = (self.close-mean(self.close, 24))/mean(self.close, 24)*100 save_hdf(tech071, 'tech071', self.length) return # SMA((TSMAX(HIGH,6)-CLOSE)/(TSMAX(HIGH,6)-TSMIN(LOW,6))*100,15,1) def tech072(self): tech072 = sma((ts_max(self.high, 6)-self.close) / (ts_max(self.high, 6)-ts_min(self.low, 6))*100, 15, 1) save_hdf(tech072, 'tech072', self.length) return # ((TSRANK(DECAYLINEAR(DECAYLINEAR(CORR((CLOSE), VOLUME, 10), 16), 4), 5) -RANK(DECAYLINEAR(CORR(VWAP, MEAN(VOLUME,30), 4),3))) * -1) def tech073(self): tech073 = ((ts_rank(decay_linear(decay_linear(correlation((self.close), self.volume, 10), 16), 4), 5) - rank(decay_linear(correlation(self.vwap, mean(self.volume, 30), 4), 3))) * -1) save_hdf(tech073, 'tech073', self.length) return # (RANK(CORR(SUM(((LOW * 0.35) + (VWAP * 0.65)), 20), SUM(MEAN(VOLUME,40), 20), 7)) + RANK(CORR(RANK(VWAP), RANK(VOLUME), 6))) def tech074(self): tech074 = (rank(correlation(ts_sum(((self.low * 0.35) + (self.vwap * 0.65)), 20), ts_sum( mean(self.volume, 40), 20), 7)) + rank(correlation(rank(self.vwap), rank(self.volume), 6))) save_hdf(tech074, 'tech074', self.length) return # COUNT(CLOSE>OPEN & BANCHMARKINDEXCLOSE<BANCHMARKINDEXOPEN,50)/COUNT(BANCHMARKINDEXCLOSE<BANCHMARKINDEXOPEN,50) def tech075(self): cond1 = (self.close > self.open) & (pd.DataFrame(np.tile((self.benchmark_close < self.benchmark_open), (self.close.shape[1], 1)), index=self.close.columns, columns=self.close.index).T) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = 1 cond2 = (self.benchmark_close < self.benchmark_open) part2 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part2[cond2] = 1 tech075 = count(part1, 50)/count(part2, 50) save_hdf(tech075, 'tech075', self.length) return # STD(ABS((CLOSE/DELAY(CLOSE,1)-1))/VOLUME,20)/MEAN(ABS((CLOSE/DELAY(CLOSE,1)-1))/VOLUME,20) def tech076(self): tech076 = stddev(abs((self.close/delay(self.close, 1)-1))/self.volume, 20) / \ mean(abs((self.close/delay(self.close, 1)-1))/self.volume, 20) save_hdf(tech076, 'tech076', self.length) return # MIN(RANK(DECAYLINEAR(((((HIGH + LOW) / 2) + HIGH) - (VWAP + HIGH)), 20)), RANK(DECAYLINEAR(CORR(((HIGH + LOW) / 2), MEAN(VOLUME,40), 3), 6)) def tech077(self): tech077 = np.minimum(rank(decay_linear(((((self.high + self.low) / 2) + self.high) - (self.vwap + self.high)), 20)), rank(decay_linear(correlation(((self.high + self.low) / 2), mean(self.volume, 40), 3), 6))) save_hdf(tech077, 'tech077', self.length) return # ((HIGH+LOW+CLOSE)/3-MA((HIGH+LOW+CLOSE)/3,12))/(0.015*MEAN(ABS(CLOSE-MEAN((HIGH+LOW+CLOSE)/3,12)),12)) def tech078(self): tech078 = ((self.high+self.low+self.close)/3-mean((self.high+self.low+self.close)/3, 12)) / \ (0.015*mean(abs(self.close-mean((self.high+self.low+self.close)/3, 12)), 12)) save_hdf(tech078, 'tech078', self.length) return # SMA(MAX(CLOSE-DELAY(CLOSE,1),0),12,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),12,1)*100 def tech079(self): tech079 = sma(np.maximum(self.close-delay(self.close, 1), 0), 12, 1)/sma(abs(self.close-delay(self.close, 1)), 12, 1)*100 save_hdf(tech079, 'tech079', self.length) return # (VOLUME-DELAY(VOLUME,5))/DELAY(VOLUME,5)*100 def tech080(self): tech080 = (self.volume-delay(self.volume, 5))/delay(self.volume, 5)*100 save_hdf(tech080, 'tech080', self.length) return # SMA(VOLUME,21,2) def tech081(self): tech081 = sma(self.volume, 21, 2) save_hdf(tech081, 'tech081', self.length) return # SMA((TSMAX(HIGH,6)-CLOSE)/(TSMAX(HIGH,6)-TSMIN(LOW,6))*100,20,1) def tech082(self): tech082 = sma((ts_max(self.high, 6)-self.close) / (ts_max(self.high, 6)-ts_min(self.low, 6))*100, 20, 1) save_hdf(tech082, 'tech082', self.length) return # (-1 * RANK(COVIANCE(RANK(HIGH), RANK(VOLUME), 5))) def tech083(self): tech083 = (-1 * rank(covariance(rank(self.high), rank(self.volume), 5))) save_hdf(tech083, 'tech083', self.length) return # SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:(CLOSE<DELAY(CLOSE,1)?-VOLUME:0)),20) def tech084(self): cond1 = (self.close > delay(self.close, 1)) cond2 = (self.close < delay(self.close, 1)) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = self.volume part1[cond2] = -self.volume tech084 = ts_sum(part1, 20) save_hdf(tech084, 'tech084', self.length) return # (TSRANK((VOLUME / MEAN(VOLUME,20)), 20) * TSRANK((-1 * DELTA(CLOSE, 7)), 8)) def tech085(self): tech085 = (ts_rank((self.volume / mean(self.volume, 20)), 20) * ts_rank((-1 * delta(self.close, 7)), 8)) save_hdf(tech085, 'tech085', self.length) return # ((0.25 < (((DELAY(CLOSE, 20) - DELAY(CLOSE, 10)) / 10) - ((DELAY(CLOSE, 10) - CLOSE) / 10))) # ? (-1 * 1) :(((((DELAY(CLOSE, 20) - DELAY(CLOSE, 10)) / 10) - ((DELAY(CLOSE, 10) - CLOSE) / # 10)) < 0) ? 1 :((-1 * 1) * (CLOSE - DELAY(CLOSE, 1))))) def tech086(self): cond1 = (0.25 < (((delay(self.close, 20) - delay(self.close, 10) ) / 10) - ((delay(self.close, 10) - self.close) / 10))) cond2 = ((((delay(self.close, 20) - delay(self.close, 10)) / 10) - ((delay(self.close, 10) - self.close) / 10)) < 0) tech086 = ((-1 * 1) * (self.close - delay(self.close, 1))) tech086[cond1] = -1 * 1 tech086[cond2] = 1 save_hdf(tech086, 'tech086', self.length) return # ((RANK(DECAYLINEAR(DELTA(VWAP, 4), 7)) + TSRANK(DECAYLINEAR(((((LOW * 0.9) + (LOW * 0.1)) - VWAP) / (OPEN - ((HIGH + LOW) / 2))), 11), 7)) * -1) def tech087(self): tech087 = ((rank(decay_linear(delta(self.vwap, 4), 7)) + ts_rank(decay_linear(((((self.low * 0.9) + (self.low * 0.1)) - self.vwap) / (self.open - ((self.high + self.low) / 2))), 11), 7)) * -1) save_hdf(tech087, 'tech087', self.length) return # (CLOSE-DELAY(CLOSE,20))/DELAY(CLOSE,20)*100 def tech088(self): tech088 = (self.close-delay(self.close, 20))/delay(self.close, 20)*100 save_hdf(tech088, 'tech088', self.length) return # 2*(SMA(CLOSE,13,2)-SMA(CLOSE,27,2)-SMA(SMA(CLOSE,13,2)-SMA(CLOSE,27,2),10,2)) def tech089(self): tech089 = 2*(sma(self.close, 13, 2)-sma(self.close, 27, 2) - sma(sma(self.close, 13, 2)-sma(self.close, 27, 2), 10, 2)) save_hdf(tech089, 'tech089', self.length) return # ( RANK(CORR(RANK(VWAP), RANK(VOLUME), 5)) * -1) def tech090(self): tech090 = (rank(correlation(rank(self.vwap), rank(self.volume), 5)) * -1) save_hdf(tech090, 'tech090', self.length) return # ((RANK((CLOSE - MAX(CLOSE, 5)))*RANK(CORR((MEAN(VOLUME,40)), LOW, 5))) * -1) def tech091(self): tech091 = ((rank((self.close - ts_max(self.close, 5))) * rank(correlation((mean(self.volume, 40)), self.low, 5))) * -1) save_hdf(tech091, 'tech091', self.length) return # (MAX(RANK(DECAYLINEAR(DELTA(((CLOSE*0.35)+(VWAP*0.65)),2),3)),TSRANK(DECAYLINEAR(ABS(CORR((MEAN(VOLUME,180)),CLOSE,13)),5),15))*-1) def tech092(self): tech092 = (np.maximum(rank(decay_linear(delta(((self.close*0.35)+(self.vwap*0.65)), 2), 3)), ts_rank(decay_linear(abs(correlation((mean(self.volume, 180)), self.close, 13)), 5), 15))*-1) save_hdf(tech092, 'tech092', self.length) return # SUM((OPEN>=DELAY(OPEN,1)?0:MAX((OPEN-LOW),(OPEN-DELAY(OPEN,1)))),20) def tech093(self): cond1 = (self.open >= delay(self.open, 1)) part1 = np.maximum((self.open-self.low), (self.open-delay(self.open, 1))) part1[cond1] = 0 tech093 = ts_sum(part1, 20) save_hdf(tech093, 'tech093', self.length) return # SUM((CLOSE>DELAY(CLOSE,1)?VOLUME:(CLOSE<DELAY(CLOSE,1)?-VOLUME:0)),30) def tech094(self): cond1 = (self.close > delay(self.close, 1)) cond2 = (self.close < delay(self.close, 1)) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = self.volume part1[cond2] = -self.volume tech094 = ts_sum(part1, 30) save_hdf(tech094, 'tech094', self.length) return # STD(AMOUNT,20) def tech095(self): tech095 = stddev(self.amount, 20) save_hdf(tech095, 'tech095', self.length) return # SMA(SMA((CLOSE-TSMIN(LOW,9))/(TSMAX(HIGH,9)-TSMIN(LOW,9))*100,3,1),3,1) def tech096(self): tech096 = sma(sma((self.close-ts_min(self.low, 9)) / (ts_max(self.high, 9)-ts_min(self.low, 9))*100, 3, 1), 3, 1) save_hdf(tech096, 'tech096', self.length) return # STD(VOLUME,10) def tech097(self): tech097 = stddev(self.volume, 10) save_hdf(tech097, 'tech097', self.length) return # ((((DELTA((SUM(CLOSE, 100) / 100), 100) / DELAY(CLOSE, 100)) < 0.05) || ((DELTA((SUM(CLOSE, 100) / 100), 100) / DELAY(CLOSE, 100)) == 0.05)) ? (-1 * (CLOSE - TSMIN(CLOSE, 100))) : (-1 * DELTA(CLOSE, 3))) def tech098(self): cond1 = ((delta((ts_sum(self.close, 100)/100), 100) / delta(self.close, 100)) <= 0.05) tech098 = (-1*delta(self.close, 3)) tech098[cond1] = (-1 * (self.close - ts_min(self.close, 100))) save_hdf(tech098, 'tech098', self.length) return # (-1 * RANK(COVIANCE(RANK(CLOSE), RANK(VOLUME), 5))) def tech099(self): tech099 = (-1 * rank(covariance(rank(self.close), rank(self.volume), 5))) save_hdf(tech099, 'tech099', self.length) return # STD(VOLUME,20) def tech100(self): tech100 = stddev(self.volume, 20) save_hdf(tech100, 'tech100', self.length) return # ((RANK(CORR(CLOSE,SUM(MEAN(VOLUME,30),37),15))<RANK(CORR(RANK(((HIGH*0.1)+(VWAP*0.9))),RANK(VOLUME),11)))*-1) def tech101(self): tech101 = ((rank(correlation(self.close, ts_sum(mean(self.volume, 30), 37), 15)) < rank( correlation(rank(((self.high*0.1)+(self.vwap*0.9))), rank(self.volume), 11)))*-1) save_hdf(tech101, 'tech101', self.length) return # SMA(MAX(VOLUME-DELAY(VOLUME,1),0),6,1)/SMA(ABS(VOLUME-DELAY(VOLUME,1)),6,1)*100 def tech102(self): tech102 = sma(np.maximum(self.volume-delay(self.volume, 1), 0), 6, 1)/sma(abs(self.volume-delay(self.volume, 1)), 6, 1)*100 save_hdf(tech102, 'tech102', self.length) return # ((20-LOWDAY(LOW,20))/20)*100 def tech103(self): tech103 = ((20-lowday(self.low, 20))/20)*100 save_hdf(tech103, 'tech103', self.length) return # (-1*(DELTA(CORR(HIGH,VOLUME,5),5)*RANK(STD(CLOSE,20)))) def tech104(self): tech104 = (-1*(delta(correlation(self.high, self.volume, 5), 5) * rank(stddev(self.close, 20)))) save_hdf(tech104, 'tech104', self.length) return # (-1*CORR(RANK(OPEN),RANK(VOLUME),10)) def tech105(self): tech105 = (-1*correlation(rank(self.open), rank(self.volume), 10)) save_hdf(tech105, 'tech105', self.length) return # CLOSE-DELAY(CLOSE,20) def tech106(self): tech106 = self.close-delay(self.close, 20) save_hdf(tech106, 'tech106', self.length) return # (((-1*RANK((OPEN-DELAY(HIGH,1))))*RANK((OPEN-DELAY(CLOSE,1))))*RANK((OPEN-DELAY(LOW,1)))) def tech107(self): tech107 = (((-1*rank((self.open-delay(self.high, 1))))*rank((self.open - delay(self.close, 1))))*rank((self.open-delay(self.low, 1)))) save_hdf(tech107, 'tech107', self.length) return # ((RANK((HIGH-MIN(HIGH,2)))^RANK(CORR((VWAP),(MEAN(VOLUME,120)),6)))*-1) def tech108(self): tech108 = (pow(rank((self.high-ts_min(self.high, 2))), rank(correlation((self.vwap), (mean(self.volume, 120)), 6)))*-1) save_hdf(tech108, 'tech108', self.length) return # SMA(HIGH-LOW,10,2)/SMA(SMA(HIGH-LOW,10,2),10,2)# def tech109(self): tech109 = sma(self.high-self.low, 10, 2) / \ sma(sma(self.high-self.low, 10, 2), 10, 2) save_hdf(tech109, 'tech109', self.length) return # SUM(MAX(0,HIGH-DELAY(CLOSE,1)),20)/SUM(MAX(0,DELAY(CLOSE,1)-LOW),20)*100 def tech110(self): tech110 = ts_sum(np.maximum(0, self.high-delay(self.close, 1)), 20) / \ ts_sum(np.maximum(0, delay(self.close, 1)-self.low), 20)*100 save_hdf(tech110, 'tech110', self.length) return # SMA(VOL*((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-LOW),11,2)-SMA(VOL*((CLOSE-LOW)-(HIGH-CLOSE))/(HIGH-L OW),4,2) def tech111(self): tech111 = sma(self.volume*((self.close-self.low)-(self.high-self.close))/(self.high-self.low), 11, 2) - \ sma(self.volume*((self.close-self.low) - (self.high-self.close))/(self.high-self.low), 4, 2) save_hdf(tech111, 'tech111', self.length) return # (SUM((CLOSE-DELAY(CLOSE,1)>0?CLOSE-DELAY(CLOSE,1):0),12)-SUM((CLOSE-DELAY(CLOSE,1)<0?ABS(CLOSE-DELAY(CLOSE,1)):0),12))/(SUM((CLOSE-DELAY(CLOSE,1)>0?CLOSE-DELAY(CLOSE,1):0),12)+SUM((CLOSE-DELAY(CLOSE,1)<0?ABS(CLOSE-DELAY(CLOSE,1)):0),12))*100 def tech112(self): cond1 = ((self.close - delay(self.close, 1)) > 0) cond2 = ((self.close - delay(self.close, 1)) < 0) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = (self.close - delay(self.close, 1)) part2 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part2[cond2] = abs(self.close - delay(self.close, 1)) tech112 = (ts_sum(part1, 12)-ts_sum(part2, 12)) / \ (ts_sum(part1, 12)+ts_sum(part2, 12)) * 100 save_hdf(tech112, 'tech112', self.length) return # (-1 * ((RANK((SUM(DELAY(CLOSE, 5), 20) / 20)) * CORR(CLOSE, VOLUME, 2)) * RANK(CORR(SUM(CLOSE, 5), SUM(CLOSE, 20), 2)))) def tech113(self): tech113 = (-1 * ((rank((ts_sum(delay(self.close, 5), 20) / 20)) * correlation(self.close, self.volume, 2)) * rank(correlation(ts_sum(self.close, 5), ts_sum(self.close, 20), 2)))) save_hdf(tech113, 'tech113', self.length) return # ((RANK(DELAY(((HIGH - LOW) / (SUM(CLOSE, 5) / 5)), 2)) * RANK(RANK(VOLUME))) / (((HIGH - LOW) / (SUM(CLOSE, 5) / 5)) / (VWAP - CLOSE))) def tech114(self): tech114 = ((rank(delay(((self.high - self.low) / (ts_sum(self.close, 5) / 5)), 2)) * rank(rank(self.volume)) ) / (((self.high - self.low) / (ts_sum(self.close, 5) / 5)) / (self.vwap - self.close))) save_hdf(tech114, 'tech114', self.length) return # RANK(CORR(((HIGH*0.9)+(CLOSE*0.1)),MEAN(VOLUME,30),10))^RANK(CORR(TSRANK(((HIGH+LOW)/2),4),TSRANK(VOLUME,10),7)) def tech115(self): tech115 = pow(rank(correlation(((self.high*0.9)+(self.close*0.1)), mean(self.volume, 30), 10)), rank(correlation(ts_rank(((self.high+self.low)/2), 4), ts_rank(self.volume, 10), 7))) save_hdf(tech115, 'tech115', self.length) return # REGBETA(CLOSE,SEQUENCE,20) # def tech116(self): tech116 = regbeta(self.close, sequence(20), 20) save_hdf(tech116, 'tech116', self.length) return # ((TSRANK(VOLUME, 32) * (1 - TSRANK(((CLOSE + HIGH) - LOW), 16))) * (1 - TSRANK(RET, 32))) def tech117(self): tech117 = ((ts_rank(self.volume, 32) * (1 - ts_rank(((self.close + self.high) - self.low), 16))) * (1 - ts_rank(self.returns, 32))) save_hdf(tech117, 'tech117', self.length) return # SUM(HIGH-OPEN,20)/SUM(OPEN-LOW,20)*100 def tech118(self): tech118 = ts_sum(self.high-self.open, 20) / \ ts_sum(self.open-self.low, 20)*100 save_hdf(tech118, 'tech118', self.length) return # (RANK(DECAYLINEAR(CORR(VWAP, SUM(MEAN(VOLUME,5), 26), 5), 7)) -RANK(DECAYLINEAR(TSRANK(MIN(CORR(RANK(OPEN), RANK(MEAN(VOLUME,15)), 21), 9), 7), 8))) def tech119(self): tech119 = (rank(decay_linear(correlation(self.vwap, ts_sum(mean(self.volume, 5), 26), 5), 7)) - rank( decay_linear(ts_rank(ts_min(correlation(rank(self.open), rank(mean(self.volume, 15)), 21), 9), 7), 8))) save_hdf(tech119, 'tech119', self.length) return # (RANK((VWAP - CLOSE)) / RANK((VWAP + CLOSE))) def tech120(self): tech120 = (rank((self.vwap - self.close)) / rank((self.vwap + self.close))) save_hdf(tech120, 'tech120', self.length) return # ((RANK((VWAP - MIN(VWAP, 12)))^TSRANK(CORR(TSRANK(VWAP, 20), TSRANK(MEAN(VOLUME,60), 2), 18), 3)) * -1) def tech121(self): tech121 = (pow(rank((self.vwap - ts_min(self.vwap, 12))), ts_rank(correlation( ts_rank(self.vwap, 20), ts_rank(mean(self.volume, 60), 2), 18), 3)) * -1) save_hdf(tech121, 'tech121', self.length) return # (SMA(SMA(SMA(LOG(CLOSE),13,2),13,2),13,2)-DELAY(SMA(SMA(SMA(LOG(CLOSE),13,2),13,2),13,2),1))/DELAY(SM A(SMA(SMA(LOG(CLOSE),13,2),13,2),13,2),1) def tech122(self): tech122 = (sma(sma(sma(log(self.close), 13, 2), 13, 2), 13, 2)-delay(sma(sma(sma(log(self.close), 13, 2), 13, 2), 13, 2), 1))/delay(sma(sma(sma(log(self.close), 13, 2), 13, 2), 13, 2), 1) save_hdf(tech122, 'tech122', self.length) return # ((RANK(CORR(SUM(((HIGH + LOW) / 2), 20), SUM(MEAN(VOLUME,60), 20), 9)) < RANK(CORR(LOW, VOLUME, 6))) * -1) def tech123(self): tech123 = ((rank(correlation(ts_sum(((self.high + self.low) / 2), 20), ts_sum( mean(self.volume, 60), 20), 9)) < rank(correlation(self.low, self.volume, 6))) * -1) save_hdf(tech123, 'tech123', self.length) return # (CLOSE - VWAP) / DECAYLINEAR(RANK(TSMAX(CLOSE, 30)),2) def tech124(self): tech124 = (self.close - self.vwap) / \ decay_linear(rank(ts_max(self.close, 30)), 2) save_hdf(tech124, 'tech124', self.length) return # (RANK(DECAYLINEAR(CORR((VWAP), MEAN(VOLUME,80),17), 20)) / RANK(DECAYLINEAR(DELTA(((CLOSE * 0.5) + (VWAP * 0.5)), 3), 16))) def tech125(self): tech125 = (rank(decay_linear(correlation((self.vwap), mean(self.volume, 80), 17), 20) ) / rank(decay_linear(delta(((self.close * 0.5) + (self.vwap * 0.5)), 3), 16))) save_hdf(tech125, 'tech125', self.length) return # (CLOSE + HIGH + LOW) / 3 def tech126(self): tech126 = (self.close+self.high+self.low)/3 save_hdf(tech126, 'tech126', self.length) return # (MEAN((100*(CLOSE-MAX(CLOSE,12))/(MAX(CLOSE,12)))^2))^(1/2) def tech127(self): tech127 = pow(mean(pow( 100*(self.close-ts_max(self.close, 12))/(ts_max(self.close, 12)), 2), 12), 0.5) save_hdf(tech127, 'tech127', self.length) return # 100-(100/(1+SUM(((HIGH+LOW+CLOSE)/3>DELAY((HIGH+LOW+CLOSE)/3,1)?(HIGH+LOW+CLOSE)/3*VOLUME:0),14)/SUM(((HIGH+LOW+CLOSE)/3<DELAY((HIGH+LOW+CLOSE)/3,1)?(HIGH+LOW+CLOSE)/3*VOLUME:0), 14))) def tech128(self): cond1 = (self.high+self.low+self.close) / \ 3 > delay((self.high+self.low+self.close)/3, 1) cond2 = (self.high + self.low + self.close) / \ 3 < delay((self.high + self.low + self.close) / 3, 1) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = (self.high+self.low+self.close)/3*self.volume part2 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part2[cond2] = (self.high+self.low+self.close)/3*self.volume tech128 = 100-(100/(1+ts_sum(part1, 14)/ts_sum(part2, 14))) save_hdf(tech128, 'tech128', self.length) return # SUM((CLOSE-DELAY(CLOSE,1)<0?ABS(CLOSE-DELAY(CLOSE,1)):0),12) def tech129(self): cond1 = ((self.close-delay(self.close, 1)) < 0) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = abs(self.close-delay(self.close, 1)) tech129 = ts_sum(part1, 12) save_hdf(tech129, 'tech129', self.length) return # (RANK(DELCAYLINEAR(CORR(((HIGH + LOW) / 2), MEAN(VOLUME, 40), 9), 10)) / RANK(DELCAYLINEAR(CORR(RANK(VWAP), RANK(VOLUME), 7), 3))) def tech130(self): tech130 = (rank(decay_linear(correlation(((self.high + self.low) / 2), mean(self.volume, 40), 9), 10)) / rank(decay_linear(correlation(rank(self.vwap), rank(self.volume), 7), 3))) save_hdf(tech130, 'tech130', self.length) return # (RANK(DELAT(VWAP, 1))^TSRANK(CORR(CLOSE,MEAN(VOLUME,50), 18), 18)) def tech131(self): tech131 = pow(rank(delta(self.vwap, 1)), ts_rank( correlation(self.close, mean(self.volume, 50), 18), 18)) save_hdf(tech131, 'tech131', self.length) return # MEAN(AMOUNT, 20) def tech132(self): tech132 = mean(self.amount, 20) save_hdf(tech132, 'tech132', self.length) return # ((20-HIGHDAY(HIGH,20))/20)*100-((20-LOWDAY(LOW,20))/20)*100 def tech133(self): tech133 = ((20-highday(self.high, 20))/20) * \ 100-((20-lowday(self.low, 20))/20)*100 save_hdf(tech133, 'tech133', self.length) return # (CLOSE-DELAY(CLOSE,12))/DELAY(CLOSE,12)*VOLUME def tech134(self): tech134 = (self.close-delay(self.close, 12)) / \ delay(self.close, 12)*self.volume save_hdf(tech134, 'tech134', self.length) return # SMA(DELAY(CLOSE/DELAY(CLOSE,20),1),20,1) def tech135(self): tech135 = sma(delay(self.close/delay(self.close, 20), 1), 20, 1) save_hdf(tech135, 'tech135', self.length) return # ((-1 * RANK(DELTA(RET, 3))) * CORR(OPEN, VOLUME, 10)) def tech136(self): tech136 = ((-1 * rank(delta(self.returns, 3))) * correlation(self.open, self.volume, 10)) save_hdf(tech136, 'tech136', self.length) return # 16*(CLOSE-DELAY(CLOSE,1)+(CLOSE-OPEN)/2+DELAY(CLOSE,1)-DELAY(OPEN,1))/((ABS(HIGH-DELAY(CLOSE, 1))> # ABS(LOW-DELAY(CLOSE,1)) & ABS(HIGH-DELAY(CLOSE,1))>ABS(HIGH-DELAY(LOW,1))? ABS(HIGH-DELAY(CLOSE,1))+ # ABS(LOW-DELAY(CLOSE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:(ABS(LOW-DELAY(CLOSE,1))> # ABS(HIGH-DELAY(LOW,1)) & ABS(LOW-DELAY(CLOSE,1))>ABS(HIGH-DELAY(CLOSE,1))?ABS(LOW-DELAY(CLOSE,1))+ # ABS(HIGH-DELAY(CLOSE,1))/2+ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4:ABS(HIGH-DELAY(LOW,1))+ # ABS(DELAY(CLOSE,1)-DELAY(OPEN,1))/4)))*MAX(ABS(HIGH-DELAY(CLOSE,1)),ABS(LOW-DELAY(CLOSE,1))) def tech137(self): cond1 = (abs(self.high-delay(self.close, 1)) > abs(self.low-delay(self.close, 1)) ) & (abs(self.high-delay(self.close, 1)) > abs(self.high-delay(self.low, 1))) cond2 = (abs(self.low-delay(self.close, 1)) > abs(self.high-delay(self.low, 1)) ) & (abs(self.low-delay(self.close, 1)) > abs(self.high-delay(self.close, 1))) part1 = abs(self.high-delay(self.low, 1)) + \ abs(delay(self.close, 1)-delay(self.open, 1))/4 part1[cond1] = abs(self.high-delay(self.close, 1))+abs(self.low - delay(self.close, 1))/2+abs(delay(self.close, 1)-delay(self.open, 1))/4 part1[cond2] = abs(self.low-delay(self.close, 1))+abs(self.high - delay(self.close, 1))/2+abs(delay(self.close, 1)-delay(self.open, 1))/4 tech137 = 16*(self.close-delay(self.close, 1)+(self.close-self.open)/2+delay(self.close, 1)-delay( self.open, 1))/part1 * np.maximum(abs(self.high-delay(self.close, 1)), abs(self.low-delay(self.close, 1))) save_hdf(tech137, 'tech137', self.length) return # ((RANK(DECAYLINEAR(DELTA((((LOW * 0.7) + (VWAP * 0.3))), 3), 20)) - TSRANK(DECAYLINEAR(TSRANK(CORR(TSRANK(LOW, 8), TSRANK(MEAN(VOLUME, 60), 17), 5), 19), 16), 7)) * -1) def tech138(self): tech138 = ((rank(decay_linear(delta((((self.low * 0.7) + (self.vwap * 0.3))), 3), 20)) - ts_rank(decay_linear( ts_rank(correlation(ts_rank(self.low, 8), ts_rank(mean(self.volume, 60), 17), 5), 19), 16), 7)) * -1) save_hdf(tech138, 'tech138', self.length) return # (-1 * CORR(OPEN, VOLUME, 10)) def tech139(self): tech139 = (-1 * correlation(self.open, self.volume, 10)) save_hdf(tech139, 'tech139', self.length) return # MIN(RANK(DECAYLINEAR(((RANK(OPEN) + RANK(LOW)) - (RANK(HIGH) + RANK(CLOSE))), 8)), TSRANK(DECAYLINEAR(CORR(TSRANK(CLOSE, 8), TSRANK(MEAN(VOLUME, 60), 20), 8), 7), 3)) def tech140(self): tech140 = np.minimum(rank(decay_linear(((rank(self.open) + rank(self.low)) - (rank(self.high) + rank(self.close))), 8)), ts_rank(decay_linear(correlation(ts_rank(self.close, 8), ts_rank(mean(self.volume, 60), 20), 8), 7), 3)) save_hdf(tech140, 'tech140', self.length) return # (RANK(CORR(RANK(HIGH), RANK(MEAN(VOLUME, 15)), 9))* -1) def tech141(self): tech141 = ( rank(correlation(rank(self.high), rank(mean(self.volume, 15)), 9)) * -1) save_hdf(tech141, 'tech141', self.length) return # (((-1 * RANK(TSRANK(CLOSE, 10))) * RANK(DELTA(DELTA(CLOSE, 1), 1))) * RANK(TSRANK((VOLUME/MEAN(VOLUME, 20)), 5))) def tech142(self): tech142 = (((-1 * rank(ts_rank(self.close, 10))) * rank(delta(delta(self.close, 1), 1))) * rank(ts_rank((self.volume/mean(self.volume, 20)), 5))) save_hdf(tech142, 'tech142', self.length) return # # CLOSE > DELAY(CLOSE, 1)?(CLOSE - DELAY(CLOSE, 1)) / DELAY(CLOSE, 1) * SELF : SELF # def tech143(self): # # return 0 # SUMIF(ABS(CLOSE/DELAY(CLOSE, 1) - 1)/AMOUNT, 20, CLOSE < DELAY(CLOSE, 1))/COUNT(CLOSE < DELAY(CLOSE, 1), 20) def tech144(self): cond1 = self.close >= delay(self.close, 1) part1 = abs(self.close/delay(self.close, 1) - 1)/self.amount part1[cond1] = 0 cond2 = self.close < delay(self.close, 1) part2 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part2[cond2] = 1 tech144 = ts_sum(part1, 20)/count(part2, 20) save_hdf(tech144, 'tech144', self.length) return # (MEAN(VOLUME, 9) - MEAN(VOLUME, 26)) / MEAN(VOLUME, 12) * 100 def tech145(self): tech145 = (mean(self.volume, 9) - mean(self.volume, 26)) / \ mean(self.volume, 12) * 100 save_hdf(tech145, 'tech145', self.length) return # MEAN((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)-SMA((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1),61,2),20)*(( CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)-SMA((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1),61,2))/SMA(((CLOS E-DELAY(CLOSE,1))/DELAY(CLOSE,1)-((CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)-SMA((CLOSE-DELAY(CLOSE, 1))/DELAY(CLOSE,1),61,2)))^2,60) def tech146(self): tech146 = (mean((self.close-delay(self.close, 1))/delay(self.close, 1)-sma((self.close-delay(self.close, 1))/delay(self.close, 1), 61, 2), 20) * ((self.close-delay(self.close, 1))/delay(self.close, 1)-sma((self.close-delay(self.close, 1))/delay(self.close, 1), 61, 2)) / sma(pow(((self.close-delay(self.close, 1))/delay(self.close, 1)-((self.close-delay(self.close, 1))/delay(self.close, 1)-sma((self.close-delay(self.close, 1))/delay(self.close, 1), 61, 2))), 2), 61, 2)) save_hdf(tech146, 'tech146', self.length) return # REGBETA(MEAN(CLOSE, 12), SEQUENCE(12)) def tech147(self): tech147 = regbeta(mean(self.close, 12), sequence(12), 12) save_hdf(tech147, 'tech147', self.length) return # ((RANK(CORR((OPEN), SUM(MEAN(VOLUME, 60), 9), 6)) < RANK((OPEN - TSMIN(OPEN, 14)))) * -1) def tech148(self): tech148 = ((rank(correlation((self.open), ts_sum(mean(self.volume, 60), 9), 6)) < rank( (self.open - ts_min(self.open, 14)))) * -1) save_hdf(tech148, 'tech148', self.length) return # #REGBETA(FILTER(CLOSE/DELAY(CLOSE,1)-1,BANCHMARKINDEXCLOSE<DELAY(BANCHMARKINDEXCLOSE,1) ),FILTER(BANCHMARKINDEXCLOSE/DELAY(BANCHMARKINDEXCLOSE,1)-1,BANCHMARKINDEXCLOSE<DELA Y(BANCHMARKINDEXCLOSE,1)),252) # def tech149(self): # cond1 = self.benchmark_close>=delay(self.benchmark_close,1) # part1 = (self.close/delay(self.close,1)-1).fillna(0).replace([np.inf,-np.inf],[0,0]) # part1[cond1] = np.nan # part2 = (self.benchmark_close/delay(self.benchmark_close,1)-1).fillna(0).replace([np.inf,-np.inf],[0,0]) # part2[cond1] = np.nan # rows = self.returns.shape[0] # columns = self.returns.columns # tech149 = pd.DataFrame(np.zeros(self.returns.shape) * np.nan,index=self.close.index, columns=self.close.columns) # for i, col in enumerate(columns): # print(i) # for j in range(2500, rows): # print(j) # model = OLS(x=np.array(part2[(j - 500):j].dropna()), # y=np.array(part1[col][(j - 500):j].dropna())) # tech149.iloc[j, i] = model.beta # save_hdf(tech149, 'tech149') # return # (CLOSE + HIGH + LOW) / 3 * VOLUME def tech150(self): tech150 = (self.close+self.high+self.low)/3*self.volume save_hdf(tech150, 'tech150', self.length) return # SMA(CLOSE - DELAY(CLOSE, 20), 20, 1) def tech151(self): tech151 = sma(self.close - delay(self.close, 20), 20, 1) save_hdf(tech151, 'tech151', self.length) return # SMA(MEAN(DELAY(SMA(DELAY(CLOSE/DELAY(CLOSE,9),1),9,1),1),12)-MEAN(DELAY(SMA(DELAY(CLOSE/DELAY(CLOSE,9),1),9,1),1),26),9,1) def tech152(self): tech152 = sma(mean(delay(sma(delay(self.close/delay(self.close, 9), 1), 9, 1), 1), 12) - mean(delay(sma(delay(self.close/delay(self.close, 9), 1), 9, 1), 1), 26), 9, 1) save_hdf(tech152, 'tech152', self.length) return # (MEAN(CLOSE,3)+MEAN(CLOSE,6)+MEAN(CLOSE,12)+MEAN(CLOSE,24))/4 def tech153(self): tech153 = (mean(self.close, 3)+mean(self.close, 6) + mean(self.close, 12)+mean(self.close, 24))/4 save_hdf(tech153, 'tech153', self.length) return # (((VWAP-MIN(VWAP,16)))<(CORR(VWAP,MEAN(VOLUME,180),18))) def tech154(self): tech154 = (((self.vwap-ts_min(self.vwap, 16)) < (correlation(self.vwap, mean(self.vwap, 180), 18)))*1) save_hdf(tech154, 'tech154', self.length) return # SMA(VOLUME,13,2)-SMA(VOLUME,27,2)-SMA(SMA(VOLUME,13,2)-SMA(VOLUME,27,2),10,2) def tech155(self): tech155 = sma(self.volume, 13, 2)-sma(self.volume, 27, 2) - \ sma(sma(self.volume, 13, 2)-sma(self.volume, 27, 2), 10, 2) save_hdf(tech155, 'tech155', self.length) return # (MAX(RANK(DECAYLINEAR(DELTA(VWAP,5),3)),RANK(DECAYLINEAR(((DELTA(((OPEN*0.15)+(LOW*0.85)),2)/((OPEN*0.15)+(LOW*0.85)))*-1),3)))*-1 def tech156(self): tech156 = (np.maximum(rank(decay_linear(delta(self.vwap, 5), 3)), rank(decay_linear( ((delta(((self.open*0.15)+(self.low*0.85)), 2)/((self.open*0.15)+(self.low*0.85)))*-1), 3)))*-1) save_hdf(tech156, 'tech156', self.length) return # (MIN(PROD(RANK(RANK(LOG(SUM(TSMIN(RANK(RANK((-1*RANK(DELTA((CLOSE-1),5))))),2),1)))),1),5)+TSRANK(DELAY((-1*RET),6),5)) def tech157(self): tech157 = (ts_min(product(rank(rank(log(ts_sum(ts_min(rank(rank( (-1*rank(delta((self.close-1), 5))))), 2), 1)))), 1), 5)+ts_rank(delay((-1*self.returns), 6), 5)) save_hdf(tech157, 'tech157', self.length) return # ((HIGH-SMA(CLOSE,15,2))-(LOW-SMA(CLOSE,15,2)))/CLOSE def tech158(self): tech158 = ((self.high-sma(self.close, 15, 2)) - (self.low-sma(self.close, 15, 2)))/self.close save_hdf(tech158, 'tech158', self.length) return # ((CLOSE-SUM(MIN(LOW,DELAY(CLOSE,1)),6))/SUM(MAX(HGIH,DELAY(CLOSE,1))-MIN(LOW,DELAY(CLOSE,1)),6) *12*24+(CLOSE-SUM(MIN(LOW,DELAY(CLOSE,1)),12))/SUM(MAX(HGIH,DELAY(CLOSE,1))-MIN(LOW,DELAY(CL OSE,1)),12)*6*24+(CLOSE-SUM(MIN(LOW,DELAY(CLOSE,1)),24))/SUM(MAX(HGIH,DELAY(CLOSE,1))-MIN(LOW,D ELAY(CLOSE,1)),24)*6*24)*100/(6*12+6*24+12*24) def tech159(self): tech159 = (((self.close-ts_sum(np.minimum(self.low, delay(self.close, 1)), 6)) / ts_sum(np.maximum(self.high, delay(self.close, 1))-np.minimum(self.low, delay(self.close, 1)), 6) * 12*24 + (self.close-ts_sum(np.minimum(self.low, delay(self.close, 1)), 12)) / ts_sum(np.maximum(self.high, delay(self.close, 1))-np.minimum(self.low, delay(self.close, 1)), 12)*6*24 + (self.close-ts_sum(np.minimum(self.low, delay(self.close, 1)), 24)) / ts_sum(np.maximum(self.high, delay(self.close, 1))-np.minimum(self.low, delay(self.close, 1)), 24)*6*24)*100 / (6*12+6*24+12*24)) save_hdf(tech159, 'tech159', self.length) return # SMA((CLOSE<=DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1) def tech160(self): cond1 = self.close <= delay(self.close, 1) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = stddev(self.close, 20) tech160 = sma(part1, 20, 1) save_hdf(tech160, 'tech160', self.length) return # MEAN(MAX(MAX((HIGH-LOW),ABS(DELAY(CLOSE,1)-HIGH)),ABS(DELAY(CLOSE,1)-LOW)),12) def tech161(self): tech161 = mean(np.maximum(np.maximum((self.high-self.low), abs( delay(self.close, 1)-self.high)), abs(delay(self.close, 1)-self.low)), 12) save_hdf(tech161, 'tech161', self.length) return # (SMA(MAX(CLOSE-DELAY(CLOSE,1),0),12,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),12,1)*100-MIN(SMA(MAX(CLOS E-DELAY(CLOSE,1),0),12,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),12,1)*100,12))/(MAX(SMA(MAX(CLOSE-DELAY(C LOSE,1),0),12,1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),12,1)*100,12)-MIN(SMA(MAX(CLOSE-DELAY(CLOSE,1),0),12, 1)/SMA(ABS(CLOSE-DELAY(CLOSE,1)),12,1)*100,12)) def tech162(self): tech162 = ((sma(np.maximum(self.close-delay(self.close, 1), 0), 12, 1)/sma(delay(self.close-delay(self.close, 1)), 12, 1)*100 - ts_min(sma(np.maximum(self.close-delay(self.close, 1), 0), 12, 1)/sma(delay(self.close-delay(self.close, 1)), 12, 1)*100, 12)) / (ts_max(sma(np.maximum(self.close-delay(self.close, 1), 0), 12, 1)/sma(delay(self.close-delay(self.close, 1)), 12, 1)*100, 12) - ts_min(sma(np.maximum(self.close-delay(self.close, 1), 0), 12, 1)/sma(delay(self.close-delay(self.close, 1)), 12, 1)*100, 12))) save_hdf(tech162, 'tech162', self.length) return # RANK(((((-1 * RET) * MEAN(VOLUME,20)) * VWAP) * (HIGH - CLOSE))) def tech163(self): tech163 = rank(((((-1 * self.returns) * mean(self.volume, 20)) * self.vwap) * (self.high - self.close))) save_hdf(tech163, 'tech163', self.length) return # SMA((((CLOSE>DELAY(CLOSE,1))?1/(CLOSE-DELAY(CLOSE,1)):1)-MIN(((CLOSE>DELAY(CLOSE,1))?1/(CLOSE-D ELAY(CLOSE,1)):1),12))/(HIGH-LOW)*100,13,2) def tech164(self): cond1 = self.close > delay(self.close, 1) part1 = pd.DataFrame(np.ones(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = 1/(self.close-delay(self.close, 1)) tech164 = sma((part1-ts_min(part1, 12)) / (self.high-self.low)*100, 13, 2) save_hdf(tech164, 'tech164', self.length) return # # MAX(SUMAC(CLOSE-MEAN(CLOSE,48)))-MIN(SUMAC(CLOSE-MEAN(CLOSE,48)))/STD(CLOSE,48) # def tech165(self): # # tech165 = np.maximum(np.cumsum(self.close-mean(self.close,48)),0)-np.minimum(np.cumsum(self.close-mean(self.close,48)),0)/stddev(self.close,48) # return 0 # -20*(20-1)^1.5*SUM(CLOSE/DELAY(CLOSE,1)-1-MEAN(CLOSE/DELAY(CLOSE,1)-1,20),20)/((20-1)*(20-2)(SUM((CLOSE/DELAY(CLOSE,1),20)^2,20))^1.5) def tech166(self): tech166 = -20*pow(20-1, 1.5)*ts_sum(self.close/delay(self.close, 1)-1-mean(self.close/delay(self.close, 1)-1, 20), 20)/((20-1)*(20-2)*pow(ts_sum(pow(self.close/delay(self.close, 1), 20, 2), 20), 1.5)) save_hdf(tech166, 'tech166', self.length) return # SUM((CLOSE-DELAY(CLOSE,1)>0?CLOSE-DELAY(CLOSE,1):0),12) def tech167(self): cond1 = (self.close-delay(self.close, 1)) > 0 part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = self.close-delay(self.close, 1) tech167 = ts_sum(part1, 12) save_hdf(tech167, 'tech167', self.length) return # (-1*VOLUME/MEAN(VOLUME,20)) def tech168(self): tech168 = (-1*self.volume/mean(self.volume, 20)) save_hdf(tech168, 'tech168', self.length) return # SMA(MEAN(DELAY(SMA(CLOSE-DELAY(CLOSE,1),9,1),1),12)-MEAN(DELAY(SMA(CLOSE-DELAY(CLOSE,1),9,1),1), 26),10,1) def tech169(self): tech169 = sma(mean(delay(sma(self.close-delay(self.close, 1), 9, 1), 1), 12) - mean(delay(sma(self.close-delay(self.close, 1), 9, 1), 1), 26), 10, 1) save_hdf(tech169, 'tech169', self.length) return # ((((RANK((1 / CLOSE)) * VOLUME) / MEAN(VOLUME,20)) * ((HIGH * RANK((HIGH - CLOSE))) / (SUM(HIGH, 5) / 5))) - RANK((VWAP - DELAY(VWAP, 5)))) def tech170(self): tech170 = ((((rank((1 / self.close)) * self.volume) / mean(self.volume, 20)) * ((self.high * rank( (self.high - self.close))) / (ts_sum(self.high, 5) / 5))) - rank((self.vwap - delay(self.vwap, 5)))) save_hdf(tech170, 'tech170', self.length) return # ((-1 * ((LOW - CLOSE) * (OPEN^5))) / ((CLOSE - HIGH) * (CLOSE^5))) def tech171(self): tech171 = ((-1 * ((self.low - self.close) * pow(self.open, 5)) ) / ((self.close - self.high) * pow(self.close, 5))) save_hdf(tech171, 'tech171', self.length) return # MEAN(ABS(SUM((LD>0&LD>HD)?LD:0,14)*100/SUM(TR,14)-SUM((HD>0&HD>LD)?HD:0,14)*100/(SUM((LD>0&LD>HD)?LD:0,14)*100/SUM(TR,14)+SUM(TR,14)+SUM((HD>0&HD>LD)?HD:0,14)*100/SUM(TR,14))*100,6)) def tech172(self): TR = np.maximum(np.maximum(self.high - self.low, abs(self.high - delay(self.close, 1))), abs(self.low - delay(self.close, 1))) HD = (self.high - delay(self.high, 1)) LD = (delay(self.low, 1) - self.low) cond1 = (LD > 0) & (LD > HD) cond2 = (HD > 0) & (HD > LD) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = LD part2 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part2[cond2] = HD tech172 = mean(abs(ts_sum(part1, 14)*100/ts_sum(TR, 14)-ts_sum(part2, 14)*100)/(ts_sum( part1, 14)*100/ts_sum(TR, 14)+ts_sum(TR, 14)+ts_sum(part2, 14)*100/ts_sum(TR, 14))*100, 6) save_hdf(tech172, 'tech172', self.length) return # 3*SMA(CLOSE,13,2)-2*SMA(SMA(CLOSE,13,2),13,2)+SMA(SMA(SMA(LOG(CLOSE),13,2),13,2),13,2); def tech173(self): tech173 = 3*sma(self.close, 13, 2)-2*sma(sma(self.close, 13, 2), 13, 2)+sma(sma(sma(log(self.close), 13, 2), 13, 2), 13, 2) save_hdf(tech173, 'tech173', self.length) return # SMA((CLOSE>DELAY(CLOSE,1)?STD(CLOSE,20):0),20,1) def tech174(self): cond1 = self.close > delay(self.close, 1) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = stddev(self.close, 20) tech174 = sma(part1, 20, 1) save_hdf(tech174, 'tech174', self.length) return # MEAN(MAX(MAX((HIGH-LOW),ABS(DELAY(CLOSE,1)-HIGH)),ABS(DELAY(CLOSE,1)-LOW)),6) def tech175(self): tech175 = mean(np.maximum(np.maximum((self.high-self.low), abs( delay(self.close, 1)-self.high)), abs(delay(self.close, 1)-self.low)), 6) save_hdf(tech175, 'tech175', self.length) return # CORR(RANK(((CLOSE - TSMIN(LOW, 12)) / (TSMAX(HIGH, 12) - TSMIN(LOW,12)))), RANK(VOLUME), 6) def tech176(self): tech176 = correlation(rank(((self.close - ts_min(self.low, 12)) / ( ts_max(self.high, 12) - ts_min(self.low, 12)))), rank(self.volume), 6) save_hdf(tech176, 'tech176', self.length) return # ((20-HIGHDAY(HIGH,20))/20)*100 def tech177(self): tech177 = ((20-highday(self.high, 20))/20)*100 save_hdf(tech177, 'tech177', self.length) return # (CLOSE-DELAY(CLOSE,1))/DELAY(CLOSE,1)*VOLUME def tech178(self): tech178 = (self.close-delay(self.close, 1)) / \ delay(self.close, 1)*self.volume save_hdf(tech178, 'tech178', self.length) return # (RANK(CORR(VWAP, VOLUME, 4)) *RANK(CORR(RANK(LOW), RANK(MEAN(VOLUME,50)), 12))) def tech179(self): tech179 = (rank(correlation(self.vwap, self.volume, 4)) * rank(correlation(rank(self.low), rank(mean(self.volume, 50)), 12))) save_hdf(tech179, 'tech179', self.length) return # ((MEAN(VOLUME,20) < VOLUME) ? ((-1 * TSRANK(ABS(DELTA(CLOSE, 7)), 60)) * SIGN(DELTA(CLOSE, 7)) : (-1 * VOLUME))) def tech180(self): cond1 = mean(self.volume, 20) < self.volume tech180 = (-1*self.volume) tech180[cond1] = ( (-1 * ts_rank(abs(delta(self.close, 7)), 60)) * sign(delta(self.close, 7))) save_hdf(tech180, 'tech180', self.length) return # SUM(((CLOSE/DELAY(CLOSE,1)-1)-MEAN((CLOSE/DELAY(CLOSE,1)-1),20))-(BANCHMARKINDEXCLOSE-MEAN(B ANCHMARKINDEXCLOSE,20))^2,20)/SUM((BANCHMARKINDEXCLOSE-MEAN(BANCHMARKINDEXCLOSE,20))^3) def tech181(self): part1 = pd.DataFrame(np.tile(pow(self.benchmark_close-mean(self.benchmark_close, 20), 2), (self.close.shape[1], 1)), index=self.close.columns, columns=self.close.index).T part2 = pd.DataFrame(np.tile(ts_sum(pow(self.benchmark_close-mean(self.benchmark_close, 20), 3), 20), (self.close.shape[1], 1)), index=self.close.columns, columns=self.close.index).T tech181 = ts_sum(((self.close/delay(self.close, 1)-1) - mean((self.close/delay(self.close, 1)-1), 20))-part1, 20)/part2 save_hdf(tech181, 'tech181', self.length) return # #COUNT((CLOSE>OPEN & BANCHMARKINDEXCLOSE>BANCHMARKINDEXOPEN)OR(CLOSE<OPEN & BANCHMARKINDEXCLOSE<BANCHMARKINDEXOPEN),20)/20 def tech182(self): cond1 = (self.close > self.open) & (pd.DataFrame(np.tile((self.benchmark_close > self.benchmark_open), (self.close.shape[1], 1)), index=self.close.columns, columns=self.close.index).T) cond2 = (self.close < self.open) & (pd.DataFrame(np.tile((self.benchmark_close < self.benchmark_open), (self.close.shape[1], 1)), index=self.close.columns, columns=self.close.index).T) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1 | cond2] = 1 tech182 = count(part1, 20)/20 save_hdf(tech182, 'tech182', self.length) return # # MAX(SUMAC(CLOSE-MEAN(CLOSE,24)))-MIN(SUMAC(CLOSE-MEAN(CLOSE,24)))/STD(CLOSE,24) # def tech183(self): # # tech183 = np.maximum(np.cumsum(self.close-mean(self.close,24)),0)-np.minimum(np.cumsum(self.close-mean(self.close,24)),0)/stddev(self.close,24) # # save_hdf(tech183, 'tech183') # return 0 # (RANK(CORR(DELAY((OPEN - CLOSE), 1), CLOSE, 200)) + RANK((OPEN - CLOSE))) def tech184(self): tech184 = (rank(correlation(delay((self.open - self.close), 1), self.close, 200)) + rank((self.open - self.close))) save_hdf(tech184, 'tech184', self.length) return # RANK((-1 * ((1 - (OPEN / CLOSE))^2))) def tech185(self): tech185 = rank((-1 * (pow(1 - (self.open / self.close), 2)))) save_hdf(tech185, 'tech185', self.length) return # (MEAN(ABS(SUM((LD>0 & LD>HD)?LD:0,14)*100/SUM(TR,14)-SUM((HD>0 & HD>LD)?HD:0,14)*100/SUM(TR,14))/ # (SUM((LD>0 & LD>HD)?LD:0,14)*100/SUM(TR,14)+SUM((HD>0 & HD>LD)?HD:0,14)*100/SUM(TR,14))*100,6)+ # DELAY(MEAN(ABS(SUM((LD>0 & LD>HD)?LD:0,14)*100/SUM(TR,14)-SUM((HD>0 & HD>LD)?HD:0,14)*100/SUM(TR,14))/ # (SUM((LD>0 & LD>HD)?LD:0,14)*100/SUM(TR,14)+SUM((HD>0 & HD>LD)?HD:0,14)*100/SUM(TR,14))*100,6),6))/2 def tech186(self): TR = np.maximum(np.maximum(self.high - self.low, abs(self.high - delay(self.close, 1))), abs(self.low - delay(self.close, 1))) HD = (self.high - delay(self.high, 1)) LD = (delay(self.low, 1) - self.low) cond1 = (LD > 0) & (LD > HD) cond2 = (HD > 0) & (HD > LD) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = LD part2 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part2[cond2] = HD tech186 = ((mean(abs(ts_sum(part1, 14)*100/ts_sum(TR, 14)-ts_sum(part2, 14)*100/ts_sum(TR, 14)) / (ts_sum(part1, 14)*100/ts_sum(TR, 14)+ts_sum(part2, 14)*100/ts_sum(TR, 14))*100, 6) + delay(mean(abs(ts_sum(part1, 14)*100/ts_sum(TR, 14)-ts_sum(part2, 14)*100/ts_sum(TR, 14)) / (ts_sum(part1, 14)*100/ts_sum(TR, 14)+ts_sum(part2, 14)*100/ts_sum(TR, 14))*100, 6), 6))/2) save_hdf(tech186, 'tech186', self.length) return # SUM((OPEN<=DELAY(OPEN,1)?0:MAX((HIGH-OPEN),(OPEN-DELAY(OPEN,1)))),20) def tech187(self): cond1 = self.open <= delay(self.open, 1) tech187 = np.maximum((self.high-self.open), (self.open-delay(self.open, 1))) tech187[cond1] = 0 save_hdf(tech187, 'tech187', self.length) return # ((HIGH-LOW–SMA(HIGH-LOW,11,2))/SMA(HIGH-LOW,11,2))*100 def tech188(self): tech188 = ((self.high-self.low-sma(self.high-self.low, 11, 2) )/sma(self.high-self.low, 11, 2))*100 save_hdf(tech188, 'tech188', self.length) return # MEAN(ABS(CLOSE-MEAN(CLOSE,6)),6) def tech189(self): tech189 = mean(abs(self.close-mean(self.close, 6)), 6) save_hdf(tech189, 'tech189', self.length) return # LOG((COUNT(CLOSE/DELAY(CLOSE,1)-1>((CLOSE/DELAY(CLOSE,19))^(1/20)-1),20)-1)* # (SUMIF(((CLOSE/DELAY(CLOSE)-1-(CLOSE/DELAY(CLOSE,19))^(1/20)-1))^2,20,CLOSE/DELAY(CLOSE)-1< # (CLOSE/DELAY(CLOSE,19))^(1/20)-1))/((COUNT((CLOSE/DELAY(CLOSE)-1<(CLOSE/DELAY(CLOSE,19))^(1/20)-1),20))* # (SUMIF((CLOSE/DELAY(CLOSE)-1-((CLOSE/DELAY(CLOSE,19))^(1/20)-1))^2,20,CLOSE/DELAY(CLOSE)-1>(CLOSE/DELAY(CLOSE,19))^(1/20)-1)))) def tech190(self): cond1 = (self.close/delay(self.close, 1) - 1) > (pow(self.close/delay(self.close, 19), 1/20)-1) part1 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part1[cond1] = 1 cond2 = (self.close/delay(self.close, 1) - 1) < (pow(self.close/delay(self.close, 19), 1/20)-1) part2 = (self.close/delay(self.close, 1)-1) - \ pow((pow(self.close/delay(self.close, 19), 1/20)-1), 2) part2[cond2] = 0 part3 = pd.DataFrame(np.zeros(self.close.shape), index=self.close.index, columns=self.close.columns) part3[cond2] = 1 part4 = (self.close/delay(self.close, 1)-1) - \ pow((pow(self.close/delay(self.close, 19), 1/20)-1), 2) part4[cond1] = 0 tech190 = log((count(part1, 20)-1)*(ts_sum(part2, 20)) / ((count(part3, 20))*(ts_sum(part4, 20)))) save_hdf(tech190, 'tech190', self.length) return # (CORR(MEAN(VOLUME,20), LOW, 5) + ((HIGH + LOW) / 2)) - CLOSE def tech191(self): tech191 = (correlation(mean(self.volume, 20), self.low, 5) + ((self.high + self.low) / 2)) - self.close save_hdf(tech191, 'tech191', self.length) return # -1*CORR(VWAP,VOLUME,6) def techJLBL(self): techJLBL = -1 * correlation(self.vwap, self.volume, 6) save_hdf(techJLBL, 'techJLBL', self.length) return # OPEN/CLOSE def techKPQK(self): techKPQK = self.open/delay(self.close, 1) save_hdf(techKPQK, 'techKPQK', self.length) return # -1*VOLUME/MEAN(VOLUME,20) def techYCCJL(self): techYCCJL = -1*self.volume/mean(self.volume, 6) save_hdf(techYCCJL, 'techYCCJL', self.length) return # -1*CORR(HIGH/LOW,VOLUME,6) def techLFBL(self): techLFBL = -1 * correlation(self.high/self.low, self.volume, 6) save_hdf(techLFBL, 'techLFBL', self.length) return def run_func(paras): startdate = paras["startdate"] enddate = paras["enddate"] count = paras["count"] length = paras["length"] techs = Tech191(startdate, enddate, count, length) func_list = paras["func"] for func_name in func_list: eval("techs." + func_name + "()") return def set_params(func_list, Start_Date, End_Date, count, length): td = {"module_name": "Tech191", "startdate": Start_Date, "enddate": End_Date, "count": count, "length": length} params = [] for i, sec_code in enumerate(func_list): td['func'] = sec_code params.append(td.copy()) return params if __name__ == '__main__': """设置更新日期""" TradeDate =
pd.read_csv(Pre_path + "\\TradeDate.csv")
pandas.read_csv
# -*- coding: utf-8 -*- """ Created on Tue Jun 20 22:14:07 2017 @author: YSu """ from pyomo.opt import SolverFactory from ISONE_dispatch import model as m1 from ISONE_dispatchLP import model as m2 from pyomo.core import Var from pyomo.core import Constraint from pyomo.core import Param from operator import itemgetter import pandas as pd import numpy as np from datetime import datetime import pyomo.environ as pyo from pyomo.environ import value Solvername = 'gurobi' Timelimit = 3600 # for the simulation of one day in seconds # Threadlimit = 8 # maximum number of threads to use def sim(days): instance = m1.create_instance('data.dat') instance2 = m2.create_instance('data.dat') instance2.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT) opt = SolverFactory(Solvername) if Solvername == 'cplex': opt.options['timelimit'] = Timelimit elif Solvername == 'gurobi': opt.options['TimeLimit'] = Timelimit # opt.options['threads'] = Threadlimit H = instance.HorizonHours D = 2 K=range(1,H+1) #Space to store results mwh_1=[] mwh_2=[] mwh_3=[] on=[] switch=[] srsv=[] nrsv=[] solar=[] onshore_wind = [] offshore_wind=[] must_run=[] flow=[] Generator=[] Duals=[] System_cost = [] df_generators = pd.read_excel('generators.xlsx',header=0,sheet_name='Generators (dispatch)') #max here can be (1,365) for day in range(1,days): if day == days-1: horizon_end = 49 else: horizon_end = 25 #load time series data for z in instance.zones: instance.GasPrice[z] = instance.SimGasPrice[z,day] instance.OilPrice[z] = instance.SimOilPrice[z,day] for i in K: instance.HorizonDemand[z,i] = instance.SimDemand[z,(day-1)*24+i] instance.HorizonOffshoreWind[z,i] = instance.SimOffshoreWind[z,(day-1)*24+i] instance.HorizonSolar[z,i] = instance.SimSolar[z,(day-1)*24+i] instance.HorizonOnshoreWind[z,i] = instance.SimOnshoreWind[z,(day-1)*24+i] instance.HorizonMustRun[z,i] = instance.SimMustRun[z,(day-1)*24+i] for d in range(1,D+1): instance.HorizonNY_imports_CT[d] = instance.SimNY_imports_CT[day-1+d] instance.HorizonNY_imports_WCMA[d] = instance.SimNY_imports_CT[day-1+d] instance.HorizonNY_imports_VT[d] = instance.SimNY_imports_CT[day-1+d] instance.HorizonHQ_imports_VT[d] = instance.SimNY_imports_CT[day-1+d] instance.HorizonNB_imports_ME[d] = instance.SimNY_imports_CT[day-1+d] instance.HorizonME_hydro[d] = instance.SimME_hydro[day-1+d] instance.HorizonVT_hydro[d] = instance.SimVT_hydro[day-1+d] instance.HorizonRI_hydro[d] = instance.SimRI_hydro[day-1+d] instance.HorizonNH_hydro[d] = instance.SimNH_hydro[day-1+d] instance.HorizonCT_hydro[d] = instance.SimCT_hydro[day-1+d] instance.HorizonWCMA_hydro[d] = instance.SimWCMA_hydro[day-1+d] instance.HorizonNEMA_hydro[d] = instance.SimNEMA_hydro[day-1+d] for i in K: instance.HorizonReserves[i] = instance.SimReserves[(day-1)*24+i] instance.HorizonCT_exports_NY[i] = instance.SimCT_exports_NY[(day-1)*24+i] instance.HorizonWCMA_exports_NY[i] = instance.SimWCMA_exports_NY[(day-1)*24+i] instance.HorizonVT_exports_NY[i] = instance.SimVT_exports_NY[(day-1)*24+i] instance.HorizonVT_exports_HQ[i] = instance.SimVT_exports_HQ[(day-1)*24+i] instance.HorizonME_exports_NB[i] = instance.SimME_exports_NB[(day-1)*24+i] NEISO_result = opt.solve(instance, tee=True, symbolic_solver_labels=True, load_solutions=False) instance.solutions.load_from(NEISO_result) # record objective function value coal = 0 gas1_1 = 0 gas2_1 = 0 gas3_1 = 0 gas1_2 = 0 gas2_2 = 0 gas3_2 = 0 gas1_3 = 0 gas2_3 = 0 gas3_3 = 0 gas1_4 = 0 gas2_4 = 0 gas3_4 = 0 gas1_5 = 0 gas2_5 = 0 gas3_5 = 0 gas1_6 = 0 gas2_6 = 0 gas3_6 = 0 gas1_7 = 0 gas2_7 = 0 gas3_7 = 0 gas1_8 = 0 gas2_8 = 0 gas3_8 = 0 Oil1_1 = 0 Oil2_1 = 0 Oil3_1 = 0 Oil1_2 = 0 Oil2_2 = 0 Oil3_2 = 0 Oil1_3 = 0 Oil2_3 = 0 Oil3_3 = 0 Oil1_4 = 0 Oil2_4 = 0 Oil3_4 = 0 Oil1_5 = 0 Oil2_5 = 0 Oil3_5 = 0 Oil1_6 = 0 Oil2_6 = 0 Oil3_6 = 0 Oil1_7 = 0 Oil2_7 = 0 Oil3_7 = 0 Oil1_8 = 0 Oil2_8 = 0 Oil3_8 = 0 NY_Imports_CT_all = 0 NY_Imports_WCMA_all = 0 NY_Imports_VT_all = 0 HQ_Imports_VT_all = 0 NB_Imports_ME_all = 0 slack = 0 fix_cost = 0 st = 0 exchn = 0 hydro = 0 onshore_all = 0 offshore_all = 0 solar_all = 0 mustrun_all = 0 for i in range(1,horizon_end): for j in instance.Coal: coal = coal + instance.mwh_1[j,i].value*(instance.seg1[j]*2.2 + instance.var_om[j]) + instance.mwh_2[j,i].value*(instance.seg2[j]*2.2 + instance.var_om[j]) + instance.mwh_3[j,i].value*(instance.seg3[j]*2.2 + instance.var_om[j]) for j in instance.Zone1Gas: gas1_1 = gas1_1 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['CT'].value + instance.var_om[j]) gas2_1 = gas2_1 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['CT'].value + instance.var_om[j]) gas3_1 = gas3_1 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['CT'].value + instance.var_om[j]) for j in instance.Zone2Gas: gas1_2 = gas1_2 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['ME'].value + instance.var_om[j]) gas2_2 = gas2_2 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['ME'].value + instance.var_om[j]) gas3_2 = gas3_2 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['ME'].value + instance.var_om[j]) for j in instance.Zone3Gas: gas1_3 = gas1_3 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['NH'].value + instance.var_om[j]) gas2_3 = gas2_3 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['NH'].value + instance.var_om[j]) gas3_3 = gas3_3 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['NH'].value + instance.var_om[j]) for j in instance.Zone4Gas: gas1_4 = gas1_4 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['NEMA'].value + instance.var_om[j]) gas2_4 = gas2_4 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['NEMA'].value + instance.var_om[j]) gas3_4 = gas3_4 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['NEMA'].value + instance.var_om[j]) for j in instance.Zone5Gas: gas1_5 = gas1_5 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['RI'].value + instance.var_om[j]) gas2_5 = gas2_5 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['RI'].value + instance.var_om[j]) gas3_5 = gas3_5 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['RI'].value + instance.var_om[j]) for j in instance.Zone6Gas: gas1_6 = gas1_6 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['SEMA'].value + instance.var_om[j]) gas2_6 = gas2_6 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['SEMA'].value + instance.var_om[j]) gas3_6 = gas3_6 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['SEMA'].value + instance.var_om[j]) for j in instance.Zone7Gas: gas1_7 = gas1_7 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['VT'].value + instance.var_om[j]) gas2_7 = gas2_7 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['VT'].value + instance.var_om[j]) gas3_7 = gas3_7 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['VT'].value + instance.var_om[j]) for j in instance.Zone8Gas: gas1_8 = gas1_8 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['WCMA'].value + instance.var_om[j]) gas2_8 = gas2_8 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['WCMA'].value + instance.var_om[j]) gas3_8 = gas3_8 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['WCMA'].value + instance.var_om[j]) for j in instance.Zone1Oil: Oil1_1 = Oil1_1 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.OilPrice['CT'].value + instance.var_om[j]) Oil2_1 = Oil2_1 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.OilPrice['CT'].value + instance.var_om[j]) Oil3_1 = Oil3_1 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.OilPrice['CT'].value + instance.var_om[j]) for j in instance.Zone2Oil: Oil1_2 = Oil1_2 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.OilPrice['ME'].value + instance.var_om[j]) Oil2_2 = Oil2_2 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.OilPrice['ME'].value + instance.var_om[j]) Oil3_2 = Oil3_2 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.OilPrice['ME'].value + instance.var_om[j]) for j in instance.Zone3Oil: Oil1_3 = Oil1_3 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.OilPrice['NH'].value + instance.var_om[j]) Oil2_3 = Oil2_3 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.OilPrice['NH'].value + instance.var_om[j]) Oil3_3 = Oil3_3 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.OilPrice['NH'].value + instance.var_om[j]) for j in instance.Zone4Oil: Oil1_4 = Oil1_4 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.OilPrice['NEMA'].value + instance.var_om[j]) Oil2_4 = Oil2_4 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.OilPrice['NEMA'].value + instance.var_om[j]) Oil3_4 = Oil3_4 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.OilPrice['NEMA'].value + instance.var_om[j]) for j in instance.Zone5Oil: Oil1_5 = Oil1_5 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.OilPrice['RI'].value + instance.var_om[j]) Oil2_5 = Oil2_5 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.OilPrice['RI'].value + instance.var_om[j]) Oil3_5 = Oil3_5 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.OilPrice['RI'].value + instance.var_om[j]) for j in instance.Zone6Oil: Oil1_6 = Oil1_6 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.OilPrice['SEMA'].value + instance.var_om[j]) Oil2_6 = Oil2_6 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.OilPrice['SEMA'].value + instance.var_om[j]) Oil3_6 = Oil3_6 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.OilPrice['SEMA'].value + instance.var_om[j]) for j in instance.Zone7Oil: Oil1_7 = Oil1_7 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.OilPrice['VT'].value + instance.var_om[j]) Oil2_7 = Oil2_7 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.OilPrice['VT'].value + instance.var_om[j]) Oil3_7 = Oil3_7 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.OilPrice['VT'].value + instance.var_om[j]) for j in instance.Zone8Oil: Oil1_8 = Oil1_8 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.OilPrice['WCMA'].value + instance.var_om[j]) Oil2_8 = Oil2_8 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.OilPrice['WCMA'].value + instance.var_om[j]) Oil3_8 = Oil3_8 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.OilPrice['WCMA'].value + instance.var_om[j]) for j in instance.NY_Imports_CT: NY_Imports_CT_all = NY_Imports_CT_all + instance.mwh_1[j,i].value*4.3 + instance.mwh_2[j,i].value*4.3 + instance.mwh_3[j,i].value*4.3 for j in instance.NY_Imports_WCMA: NY_Imports_WCMA_all = NY_Imports_WCMA_all + instance.mwh_1[j,i].value*4.3 + instance.mwh_2[j,i].value*4.3 + instance.mwh_3[j,i].value*4.3 for j in instance.NY_Imports_VT: NY_Imports_VT_all = NY_Imports_VT_all + instance.mwh_1[j,i].value*4.3 + instance.mwh_2[j,i].value*4.3 + instance.mwh_3[j,i].value*4.3 for j in instance.HQ_Imports_VT: HQ_Imports_VT_all = HQ_Imports_VT_all + instance.mwh_1[j,i].value*4.3 + instance.mwh_2[j,i].value*4.3 + instance.mwh_3[j,i].value*4.3 for j in instance.NB_Imports_ME: NB_Imports_ME_all = NB_Imports_ME_all + instance.mwh_1[j,i].value*4.3 + instance.mwh_2[j,i].value*4.3 + instance.mwh_3[j,i].value*4.3 for j in instance.Slack: slack = slack + instance.mwh_1[j,i].value*(instance.seg1[j]*2000 + instance.var_om[j]) + instance.mwh_2[j,i].value*(instance.seg2[j]*2000 + instance.var_om[j]) + instance.mwh_3[j,i].value*(instance.seg3[j]*2000 + instance.var_om[j]) for j in instance.Generators: fix_cost = fix_cost + instance.no_load[j]*instance.on[j,i].value for j in instance.Generators: st = st + instance.st_cost[j]*instance.switch[j,i].value for s in instance.sources: for k in instance.sinks: exchn = exchn + instance.flow[s,k,i].value*instance.hurdle[s,k] for j in instance.Hydro: hydro = hydro + instance.mwh_1[j,i].value*0.01 + instance.mwh_2[j,i].value*0.01 + instance.mwh_3[j,i].value*0.01 for j in instance.zones: onshore_all = onshore_all + instance.onshorewind[j,i].value*0.01 + instance.onshorewind[j,i].value*0.01 + instance.onshorewind[j,i].value*0.01 for j in instance.zones: offshore_all = offshore_all + instance.offshorewind[j,i].value*0.01 + instance.offshorewind[j,i].value*0.01 + instance.offshorewind[j,i].value*0.01 for j in instance.zones: solar_all = solar_all + instance.solar[j,i].value*0.01 + instance.solar[j,i].value*0.01 + instance.solar[j,i].value*0.01 for j in instance.zones: mustrun_all = mustrun_all + instance.mustrun[j,i].value*0.01 + instance.mustrun[j,i].value*0.01 + instance.mustrun[j,i].value*0.01 S = coal + gas1_1 + gas2_1 + gas3_1 + gas1_2 + gas2_2 + gas3_2 + gas1_3 + gas2_3 + gas3_3 + gas1_4 + gas2_4 + gas3_4 + gas1_5 + gas2_5 + gas3_5 + gas1_6 + gas2_6 + gas3_6 + gas1_7 + gas2_7 + gas3_7 + gas1_8 + gas2_8 + gas3_8 + Oil1_1 + Oil2_1 + Oil3_1 + Oil1_2 + Oil2_2 + Oil3_2 + Oil1_3 + Oil2_3 + Oil3_3 + Oil1_4 + Oil2_4 + Oil3_4 + Oil1_5 + Oil2_5 + Oil3_5 + Oil1_6 + Oil2_6 + Oil3_6 + Oil1_7 + Oil2_7 + Oil3_7 + Oil1_8 + Oil2_8 + Oil3_8 + slack + fix_cost + st + exchn + NY_Imports_CT_all + NY_Imports_WCMA_all + NY_Imports_VT_all + HQ_Imports_VT_all + NB_Imports_ME_all + hydro + onshore_all + offshore_all + solar_all + mustrun_all System_cost.append(round(S, 3)) for z in instance2.zones: instance2.GasPrice[z] = instance2.SimGasPrice[z,day] instance2.OilPrice[z] = instance2.SimOilPrice[z,day] for i in K: instance2.HorizonDemand[z,i] = instance2.SimDemand[z,(day-1)*24+i] instance2.HorizonOffshoreWind[z,i] = instance2.SimOffshoreWind[z,(day-1)*24+i] instance2.HorizonSolar[z,i] = instance2.SimSolar[z,(day-1)*24+i] instance2.HorizonOnshoreWind[z,i] = instance2.SimOnshoreWind[z,(day-1)*24+i] instance2.HorizonMustRun[z,i] = instance2.SimMustRun[z,(day-1)*24+i] for d in range(1,D+1): instance2.HorizonNY_imports_CT[d] = instance2.SimNY_imports_CT[day-1+d] instance2.HorizonNY_imports_WCMA[d] = instance2.SimNY_imports_CT[day-1+d] instance2.HorizonNY_imports_VT[d] = instance2.SimNY_imports_CT[day-1+d] instance2.HorizonHQ_imports_VT[d] = instance2.SimNY_imports_CT[day-1+d] instance2.HorizonNB_imports_ME[d] = instance2.SimNY_imports_CT[day-1+d] instance2.HorizonME_hydro[d] = instance2.SimME_hydro[day-1+d] instance2.HorizonVT_hydro[d] = instance2.SimVT_hydro[day-1+d] instance2.HorizonRI_hydro[d] = instance2.SimRI_hydro[day-1+d] instance2.HorizonNH_hydro[d] = instance2.SimNH_hydro[day-1+d] instance2.HorizonCT_hydro[d] = instance2.SimCT_hydro[day-1+d] instance2.HorizonWCMA_hydro[d] = instance2.SimWCMA_hydro[day-1+d] instance2.HorizonNEMA_hydro[d] = instance2.SimNEMA_hydro[day-1+d] for i in K: instance2.HorizonReserves[i] = instance2.SimReserves[(day-1)*24+i] instance2.HorizonCT_exports_NY[i] = instance2.SimCT_exports_NY[(day-1)*24+i] instance2.HorizonWCMA_exports_NY[i] = instance2.SimWCMA_exports_NY[(day-1)*24+i] instance2.HorizonVT_exports_NY[i] = instance2.SimVT_exports_NY[(day-1)*24+i] instance2.HorizonVT_exports_HQ[i] = instance2.SimVT_exports_HQ[(day-1)*24+i] instance2.HorizonME_exports_NB[i] = instance2.SimME_exports_NB[(day-1)*24+i] for j in instance.Generators: for t in K: if value(instance.on[j,t]) == 1: instance2.on[j,t] = 1 instance2.on[j,t].fixed = True else: instance.on[j,t] = 0 instance2.on[j,t] = 0 instance2.on[j,t].fixed = True if value(instance.switch[j,t]) == 1: instance2.switch[j,t] = 1 instance2.switch[j,t].fixed = True else: instance2.switch[j,t] = 0 instance2.switch[j,t] = 0 instance2.switch[j,t].fixed = True results = opt.solve(instance2, tee=True, symbolic_solver_labels=True, load_solutions=False) instance2.solutions.load_from(results) print ("Duals") for c in instance2.component_objects(Constraint, active=True): # print (" Constraint",c) cobject = getattr(instance2, str(c)) if str(c) in ['Bal1Constraint','Bal2Constraint','Bal3Constraint','Bal4Constraint', 'Bal5Constraint','Bal6Constraint','Bal7Constraint','Bal8Constraint']: for index in cobject: if index>0 and index<horizon_end: # print (" Constraint",c) try: Duals.append((str(c),index+((day-1)*24), round(instance2.dual[cobject[index]], 3))) except KeyError: Duals.append((str(c),index+((day-1)*24),-999)) # print (" ", index, instance2.dual[cobject[index]]) #The following section is for storing and sorting results for v in instance.component_objects(Var, active=True): varobject = getattr(instance, str(v)) a=str(v) if a=='mwh_1': for index in varobject: name = index[0] g = df_generators[df_generators['name']==name] seg1 = g['seg1'].values seg1 = seg1[0] zone = g['zone'] if int(index[1]>0 and index[1]<horizon_end): gas_price = instance.GasPrice[z].value oil_price = instance.OilPrice[z].value if index[0] in instance.Gas: marginal_cost = seg1*gas_price mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Gas',round(marginal_cost, 3))) elif index[0] in instance.Coal: marginal_cost = seg1*2.2 mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Coal',round(marginal_cost, 3))) elif index[0] in instance.Oil: marginal_cost = seg1*oil_price mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Oil',round(marginal_cost, 3))) elif index[0] in instance.Slack: marginal_cost = 700 mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Slack',round(marginal_cost, 3))) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Hydro',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_CT: marginal_cost = 4.3 mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_WCMA: marginal_cost = 4.3 mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_VT: marginal_cost = 4.3 mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.HQ_Imports_VT: marginal_cost = 4.3 mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NB_Imports_ME: marginal_cost = 4.3 mwh_1.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) if a=='mwh_2': for index in varobject: name = index[0] g = df_generators[df_generators['name']==name] seg2 = g['seg2'].values seg2 = seg2[0] zone = g['zone'] if int(index[1]>0 and index[1]<horizon_end): gas_price = instance.GasPrice[z].value oil_price = instance.OilPrice[z].value if index[0] in instance.Gas: marginal_cost = seg2*gas_price mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Gas',round(marginal_cost, 3))) elif index[0] in instance.Coal: marginal_cost = seg2*2.2 mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Coal',round(marginal_cost, 3))) elif index[0] in instance.Oil: marginal_cost = seg2*oil_price mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Oil',round(marginal_cost, 3))) elif index[0] in instance.Slack: marginal_cost = 700 mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Slack',round(marginal_cost, 3))) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Hydro',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_CT: marginal_cost = 4.3 mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_WCMA: marginal_cost = 4.3 mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_VT: marginal_cost = 4.3 mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.HQ_Imports_VT: marginal_cost = 4.3 mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NB_Imports_ME: marginal_cost = 4.3 mwh_2.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) if a=='mwh_3': for index in varobject: name = index[0] g = df_generators[df_generators['name']==name] seg3 = g['seg3'].values seg3 = seg3[0] zone = g['zone'] if int(index[1]>0 and index[1]<horizon_end): gas_price = instance.GasPrice[z].value oil_price = instance.OilPrice[z].value if index[0] in instance.Gas: marginal_cost = seg3*gas_price mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Gas',round(marginal_cost, 3))) elif index[0] in instance.Coal: marginal_cost = seg3*2.2 mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Coal',round(marginal_cost, 3))) elif index[0] in instance.Oil: marginal_cost = seg3*oil_price mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Oil',round(marginal_cost, 3))) elif index[0] in instance.Slack: marginal_cost = 700 mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Slack',round(marginal_cost, 3))) elif index[0] in instance.Hydro: marginal_cost = 0 mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Hydro',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_CT: marginal_cost = 4.3 mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_WCMA: marginal_cost = 4.3 mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NY_Imports_VT: marginal_cost = 4.3 mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.HQ_Imports_VT: marginal_cost = 4.3 mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) elif index[0] in instance.NB_Imports_ME: marginal_cost = 4.3 mwh_3.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0],'Imports',round(marginal_cost, 3))) # if a=='on': # for index in varobject: # name = index[0] # g = df_generators[df_generators['name']==name] # zone = g['zone'] # if int(index[1]>0 and index[1]<horizon_end): # on.append((index[0],index[1]+((day-1)*24),varobject[index].value,zone.values[0])) # if a=='switch': # for index in varobject: # name = index[0] # g = df_generators[df_generators['name']==name] # zone = g['zone'] # if int(index[1]>0 and index[1]<horizon_end): # switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,zone.values[0])) # if a=='srsv': # for index in varobject: # name = index[0] # g = df_generators[df_generators['name']==name] # zone = g['zone'] # if int(index[1]>0 and index[1]<horizon_end): # srsv.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0])) # if a=='nrsv': # for index in varobject: # name = index[0] # g = df_generators[df_generators['name']==name] # zone = g['zone'] # if int(index[1]>0 and index[1]<horizon_end): # nrsv.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3),zone.values[0])) if a=='offshorewind': for index in varobject: if int(index[1]>0 and index[1]<horizon_end): offshore_wind.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3))) if a=='solar': for index in varobject: if int(index[1]>0 and index[1]<horizon_end): solar.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3))) if a=='onshorewind': for index in varobject: if int(index[1]>0 and index[1]<horizon_end): onshore_wind.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3))) if a=='mustrun': for index in varobject: if int(index[1]>0 and index[1]<horizon_end): must_run.append((index[0],index[1]+((day-1)*24),round(varobject[index].value, 3))) if a=='flow': for index in varobject: if int(index[2]>0 and index[2]<horizon_end): flow.append((index[0],index[1],index[2]+((day-1)*24),round(varobject[index].value, 3))) for j in instance.Generators: if value(instance.on[j,24]) == 1: instance.on[j,0] = 1 else: instance.on[j,0] = 0 instance.on[j,0].fixed = True if instance.mwh_1[j,24].value <=0 and instance.mwh_1[j,24].value>= -0.0001: newval_1=0 else: newval_1=instance.mwh_1[j,24].value instance.mwh_1[j,0] = newval_1 instance.mwh_1[j,0].fixed = True if instance.mwh_2[j,24].value <=0 and instance.mwh_2[j,24].value>= -0.0001: newval=0 else: newval=instance.mwh_2[j,24].value if instance.mwh_3[j,24].value <=0 and instance.mwh_3[j,24].value>= -0.0001: newval2=0 else: newval2=instance.mwh_3[j,24].value instance.mwh_2[j,0] = newval instance.mwh_2[j,0].fixed = True instance.mwh_3[j,0] = newval2 instance.mwh_3[j,0].fixed = True if value(instance.switch[j,24]) == 1: instance.switch[j,0] = 1 else: instance.switch[j,0] = 0 instance.switch[j,0].fixed = True if instance.srsv[j,24].value <=0 and instance.srsv[j,24].value>= -0.0001: newval_srsv=0 else: newval_srsv=instance.srsv[j,24].value instance.srsv[j,0] = newval_srsv instance.srsv[j,0].fixed = True if instance.nrsv[j,24].value <=0 and instance.nrsv[j,24].value>= -0.0001: newval_nrsv=0 else: newval_nrsv=instance.nrsv[j,24].value instance.nrsv[j,0] = newval_nrsv instance.nrsv[j,0].fixed = True print(day) mwh_1_pd=pd.DataFrame(mwh_1,columns=['Generator','Time','Value','Zones','Type','$/MWh']) mwh_2_pd=pd.DataFrame(mwh_2,columns=['Generator','Time','Value','Zones','Type','$/MWh']) mwh_3_pd=pd.DataFrame(mwh_3,columns=['Generator','Time','Value','Zones','Type','$/MWh']) # on_pd=pd.DataFrame(on,columns=['Generator','Time','Value','Zones']) # switch_pd=pd.DataFrame(switch,columns=['Generator','Time','Value','Zones']) # srsv_pd=pd.DataFrame(srsv,columns=['Generator','Time','Value','Zones']) # nrsv_pd=pd.DataFrame(nrsv,columns=['Generator','Time','Value','Zones']) solar_pd=pd.DataFrame(solar,columns=['Zone','Time','Value']) onshore_wind_pd=pd.DataFrame(onshore_wind,columns=['Zone','Time','Value']) offshore_wind_pd=pd.DataFrame(offshore_wind,columns=['Zone','Time','Value']) must_run_pd=pd.DataFrame(must_run,columns=['Zone','Time','Value']) flow_pd=pd.DataFrame(flow,columns=['Source','Sink','Time','Value']) shadow_price=
pd.DataFrame(Duals,columns=['Constraint','Time','Value'])
pandas.DataFrame
#!/usr/bin/env python # coding: utf-8 import umap import mne, numpy as np import seaborn as sns import glob import pandas as pd import os, os.path as op from sklearn.preprocessing import StandardScaler from scipy.stats import kurtosis from scipy.signal import welch import pylab def get_subjid(filename): return os.path.basename(filename).split('_')[0] def get_raw_subj_data(subjid, topdir='/fast/ICA/*/'): glob_cmd = os.path.join(topdir, subjid+'*_300srate.fif') return glob.glob(glob_cmd)[0] def get_distribution(filename): #Datasets are in the following formate /topdir/Distribution/Dataset return filename.split('/')[-2] def assign_repo_EEG_labels(dframe): ''' Add repo specific EEG names to data Parameters ---------- dframe : pd.DataFrame DESCRIPTION. Returns ------- dframe : pd.DataFrame DataFrame with EOG and ECG labels marked as veog, heog, ekg. ''' dframe['veog'] = None dframe['heog'] = None dframe['ekg'] = None dframe.loc[dframe.distribution=='CAMCAN', ['veog','heog','ekg']] = \ 'EOG061','EOG062','ECG063' dframe.loc[dframe.distribution=='MOUS', ['veog','heog','ekg']] = \ 'EEG058','EEG057','EEG059' dframe.loc[dframe.distribution=='HCP', ['veog','heog','ekg']] = \ 'VEOG','HEOG','ECG' dframe.loc[dframe.distribution=='NIH_HV', ['veog','heog','ekg']] = \ None, None, None return dframe def populate_dframe(topdir='/fast/ICA/', load_ica=False): dsets=glob.glob(op.join(topdir, '*/*0-ica.fif')) dframe=pd.DataFrame(dsets, columns=['ica_filename']) dframe['distribution']=dframe['ica_filename'].apply(get_distribution) dframe['subjid']=dframe['ica_filename'].apply(get_subjid) dframe['raw_fname'] = dframe['subjid'].apply(get_raw_subj_data) dframe.distribution.value_counts() dframe = assign_repo_EEG_labels(dframe) if load_ica == False: return dframe # else: # return dframe, def get_consistent_ch_names(current_dframe): '''Hack to get all the same topomap dimensions''' ch_names=set() for index,row in current_dframe.iterrows(): raw = mne.io.read_raw_fif(row['raw_fname']) ch_names=set(raw.ch_names).union(ch_names) if current_dframe.iloc[0]['distribution']=='MOUS': ch_names = [i for i in ch_names if i[0]=='M'] # elif current_dframe.iloc[0]['distribution']=='CAMCAN': # ch_names = [i for i in ch_names if i[0]=='M'] # tmp=ica.get_sources(raw, start=0, stop=100*raw.info['sfreq']) # freqs, _ =welch(tmp._data, fs=raw.info['sfreq']) return ch_names def calc_hcp_bipolar(row): '''Load info from the mne-hcp and return bipolar calculated ECG, VEOG, HEOG''' subjid = row.subjid #info read from mne-hcp not the same as the one tied to the raw dataset info = mne.io.read_info(f'/fast/ICA/HCPinfo/{subjid}-info.fif') raw=mne.io.read_raw_fif(row.raw_fname, preload=True) ecgPos_idx=info.ch_names.index('ECG+') ecgNeg_idx=info.ch_names.index('ECG-') veogPos_idx=info.ch_names.index('VEOG+') veogNeg_idx=info.ch_names.index('VEOG-') heogPos_idx=info.ch_names.index('HEOG+') heogNeg_idx=info.ch_names.index('HEOG-') ecg=raw._data[ecgPos_idx,:]-raw._data[ecgNeg_idx,:] veog=raw._data[veogPos_idx,:]-raw._data[veogNeg_idx,:] heog=raw._data[heogPos_idx,:]-raw._data[heogNeg_idx,:] raw._data[ecgPos_idx,:]=ecg raw._data[veogPos_idx,:]=veog raw._data[heogPos_idx,:]=heog raw.rename_channels({raw.ch_names[ecgPos_idx]:'ECG'}) raw.rename_channels({raw.ch_names[veogPos_idx]:'VEOG'}) raw.rename_channels({raw.ch_names[heogPos_idx]:'HEOG'}) raw.drop_channels(raw.ch_names[ecgNeg_idx]) raw.drop_channels(raw.ch_names[veogNeg_idx]) raw.drop_channels(raw.ch_names[heogNeg_idx]) return raw def assess_ICA_spectral_properties(current_dframe): '''Loop over all datasets and return ICA metrics''' current_dframe.reset_index(inplace=True) #Load first dataset to allocate size to the dataframe raw = mne.io.read_raw_fif(current_dframe.iloc[0]['raw_fname']) ch_names = get_consistent_ch_names(current_dframe) ica = mne.preprocessing.read_ica(current_dframe.iloc[0]['ica_filename']) ica_timeseries = ica.get_sources(raw, start=0, stop=100*raw.info['sfreq']) comp_num, samples = ica_timeseries._data.shape freqs, _ = welch(ica_timeseries._data, fs=raw.info['sfreq']) spectra_dframe = pd.DataFrame(np.zeros([comp_num*len(current_dframe), len(freqs)]), columns = freqs) spectra_dframe['kurtosis'] = 0 for index,row in current_dframe.iterrows(): print(index) veog_ch, heog_ch, ekg_ch = row[['veog', 'heog', 'ekg']] ica = mne.preprocessing.read_ica(row['ica_filename']) component = ica.get_components() if row.distribution == 'HCP': raw = calc_hcp_bipolar(row) else: raw = mne.io.read_raw_fif(row['raw_fname'], preload=True) ch_indexs = set(raw.ch_names).intersection(ch_names) # raw = mne.io.read_raw_fif(row['raw_fname'], preload=True) ica_timeseries = ica.get_sources(raw, start=0, stop=100*raw.info['sfreq']) freqs, power = welch(ica_timeseries._data, fs=raw.info['sfreq']) log_power = np.log(power) spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), freqs]=log_power spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'kurtosis'] = kurtosis(ica_timeseries._data, axis=1) spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'component_num']= range(comp_num) spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'subjid'] = row['subjid'] try : bads_ecg=ica.find_bads_ecg(raw, ch_name=ekg_ch, method='correlation')[1] spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'ecg_bads_corr'] = bads_ecg except: spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'ecg_bads_corr'] = np.NaN try: bads_ecg_ctps = ica.find_bads_ecg(raw, ch_name=ekg_ch, method='ctps')[1] spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'ecg_bads_ctps'] = bads_ecg_ctps except: spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'ecg_bads_ctps'] = np.NaN try: bads_veog = ica.find_bads_eog(raw, ch_name=veog_ch)[1] spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'veog_bads_corr'] = bads_veog except: spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'veog_bads_corr'] = np.NaN try: bads_heog = ica.find_bads_eog(raw, ch_name=heog_ch)[1] spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'heog_bads_corr'] = bads_heog except: spectra_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'heog_bads_corr'] = np.NaN # veog_corr[index*comp_num:(index*comp_num + comp_num)] = ica.find_bads_eog(raw, ch_name=veog_ch)[1] # heog_corr[index*comp_num:(index*comp_num + comp_num)] = ica.find_bads_eog(raw, ch_name=heog_ch)[1] return spectra_dframe def plot_topo_hack(normalized_topo): ref_sens = mne.io.read_raw_fif('/fast/ICA/CAMCAN/sub-CC621184_ses-rest_task-rest_proc-sss_300srate.fif') ref_sens.crop(0, 2) ref_sens.pick_types(meg='mag') ref_sens.load_data() epochs = mne.make_fixed_length_epochs(ref_sens) evoked = epochs.average() if normalized_topo.shape.__len__() == 1: evoked._data[:,0]=normalized_topo evoked.plot_topomap(times=evoked.times[0], colorbar=False) else: evoked._data[:,:25]=normalized_topo evoked.plot_topomap(times=evoked.times[0:25], ncols=5, nrows=5, colorbar=False) def assess_ICA_topographic_properties(current_dframe): '''Loop over all datasets and return ICA metrics''' ref_sens = mne.io.read_raw_fif('/fast/ICA/CAMCAN/sub-CC621184_ses-rest_task-rest_proc-sss_300srate.fif', preload=True) ref_sens.pick_types(meg='mag') ref_ica_fname = '/fast/ICA/CAMCAN/sub-CC621184_ses-rest_task-rest_proc-sss_0-ica.fif' ref_ica = mne.preprocessing.read_ica(ref_ica_fname) current_dframe.reset_index(inplace=True, drop=True) ica = mne.preprocessing.read_ica(current_dframe.iloc[0]['ica_filename']) _, comp_num = ica.get_components().shape #_timeseries._data.shape topo_dframe = pd.DataFrame(np.zeros([comp_num*len(current_dframe), 102]), columns = range(102)) for index,row in current_dframe.iterrows(): print(index) veog_ch, heog_ch, ekg_ch = row[['veog', 'heog', 'ekg']] ica = mne.preprocessing.read_ica(row['ica_filename']) component = ica.get_components() convert_to_ref=mne.forward._map_meg_or_eeg_channels(ica.info, ref_sens.info, # reference_sens.info, 'accurate', (0., 0., 0.04)) normalized_topo = convert_to_ref @ component mins_= normalized_topo.min(axis=0) maxs_ = normalized_topo.max(axis=0) standardized_topo = 2 * (normalized_topo - mins_ ) / (maxs_ - mins_) - 1 topo_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), range(102)]=standardized_topo.T topo_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'component_num']= range(comp_num) topo_dframe.loc[index*comp_num:(index*comp_num + comp_num-1), 'subjid'] = row['subjid'] return topo_dframe # Make an input dataframe of paths dframe = populate_dframe() # Loop over ICA filepaths to save out csv files for dist in dframe.distribution.unique(): current_dframe = dframe[dframe.distribution==dist] out_dframe = assess_ICA_properties(current_dframe) out_dframe.to_csv(f'/fast/ICA/Spectra_{dist}.tsv', sep='\t', index=None) # Compile csv files into larger dataframe again dlist = [] for repo in ['CAMCAN', 'HCP','MOUS','NIH_HV']: if 'tmp' in locals().keys() : del tmp tmp =
pd.read_csv(f'Spectra_{repo}.tsv', sep='\t')
pandas.read_csv
from .static.Qua_config import * from .Qua_assisFunc import * import pandas as pd import numpy as np from .main_match import main_match # function 1 def GetAllIdType(StudentList): StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0]) id_col_name = ['身分別1','身分別2','身分別3','特殊身份別'] column = [str(x) for i in range(len(id_col_name)) for x in StudentList[id_col_name[i]].tolist() if str(x)!='None'] return sorted(list(set(column))) # function 2 def DivideDF(ordered_IdList, StudentList, DormList): StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0]) StudentList['學號'] = [str(i) for i in range(len(StudentList))] # TODO: remove! DormList = pd.DataFrame(DormList[1:], columns=DormList[0]) StudentList = StudentList.drop(columns = Ori_ColumnToBeDrop) BedNumDict = countBedNum(DormList) # get get_str2int id_dict = get_id_dict(ordered_IdList) # StudentList = get_str2int(id_dict, StudentList) # string contain id & willingness StudentList = get_id2int(id_dict, StudentList) StudentList = get_willing2int(StudentList) # divide in-out campus StudentList = StudentList.sort_values(by = '校內外意願').reset_index(drop = True) InCamNum = len(StudentList)-StudentList.groupby('校內外意願').count()['性別'][3] InCam_df = StudentList.iloc[:InCamNum,:] InCam_df = InCam_df.sort_values(by = '性別').reset_index(drop = True) InCam_df['資格'] = [2 if (row['id_index']==1 and row['是否需要安排身障房間']=='是') else 0 for index,row in InCam_df.iterrows()] # incampus divide boy-girl GirlInCamNum = InCam_df.groupby(['性別']).size()['女性'] GirlInCam = InCam_df.iloc[:GirlInCamNum,:].sort_values(by='id_index').reset_index(drop=True) BoyInCam = InCam_df.iloc[GirlInCamNum:,:].sort_values(by='id_index').reset_index(drop=True) # WaitDF WaitDF = StudentList.iloc[InCamNum:,:] # get qualification of boy&girl df GirlInCam = dealWithPreference(assign_qualificaiton(GirlInCam,BedNumDict)) BoyInCam = dealWithPreference(assign_qualificaiton(BoyInCam,BedNumDict)) GirlInCam = GirlInCam.sort_values(by='資格').reset_index(drop=True) BoyInCam = BoyInCam.sort_values(by='資格').reset_index(drop=True) # All-Wait DF QuaGirlGroup = GirlInCam.groupby('資格').count() NoQuaGirlNum = QuaGirlGroup['性別'][0] QuaBoyGroup = BoyInCam.groupby('資格').count() NoQuaBoyNum = QuaBoyGroup['性別'][0] WaitAllDf = [GirlInCam.iloc[:NoQuaGirlNum,:],BoyInCam.iloc[:NoQuaBoyNum],WaitDF] WaitDF = pd.concat(WaitAllDf) # Output Girl&Boy df GirlInCam = GirlInCam.iloc[NoQuaGirlNum:,:].drop(columns = AlgorithmNeedDrop).sort_values(by='id_index').reset_index(drop=True) BoyInCam = BoyInCam.iloc[NoQuaBoyNum:,:].drop(columns = AlgorithmNeedDrop).sort_values(by='id_index').reset_index(drop=True) GirlInCam['永久地址'] = Address2Nationality(GirlInCam['永久地址'],countryDict) BoyInCam['永久地址'] = Address2Nationality(BoyInCam['永久地址'],countryDict) # organize Wait df WaitDF = WaitDF.drop(columns=Wait_Drop) return BoyInCam, GirlInCam, WaitDF def list2df(beds): columns = beds[0] data = beds[1:] df = pd.DataFrame(data, columns = beds[0]) return df def Match(BoyInQua, GirlInQua, beds): beds_df = list2df(beds) BoyInQua, GirlInQua = main_match(BoyInQua, GirlInQua, beds_df) return BoyInQua, GirlInQua # function4 def GetOutputDF(id_orderList, BoyQua, GirlQua, StudentList, WaitDF): # BoyQua = pd.DataFrame(BoyQua[1:], columns=BoyQua[0]) # GirlQua = pd.DataFrame(GirlQua[1:], columns=GirlQua[0]) StudentList = pd.DataFrame(StudentList[1:], columns=StudentList[0]) StudentList['學號'] = [str(i) for i in range(len(StudentList))] # TODO: remove! # WaitDF = pd.DataFrame(WaitDF[1:], columns=WaitDF[0]) # Divide WaitDF => campus,BOT WaitDF = WaitDF.sort_values('校內外意願') WillGroupNum = WaitDF.groupby('校內外意願') CampusNum = WillGroupNum.count()['性別'][1] + WillGroupNum.count()['性別'][2] NotBotNum = len(WaitDF) - WillGroupNum.count()['性別'][2] - WillGroupNum.count()['性別'][3] Campus = WaitDF.iloc[:CampusNum,:].drop(columns = CampusWait_Drop_AsQua).sort_values('性別') Bot = WaitDF.iloc[NotBotNum:,:] # organize Campus Campus['資格'] = [0 for i in range(len(Campus))] CampusGirlNum = Campus.groupby('性別')['性別'].count().tolist()[0] CampusBoy = OrderAssign(Campus.iloc[CampusGirlNum:]) CampusGirl = OrderAssign(Campus.iloc[:CampusGirlNum]) BoyQua['順位序號'] = [0 for i in range(len(BoyQua))] GirlQua['順位序號'] = [0 for i in range(len(GirlQua))] CampusBoy =
pd.concat([BoyQua,CampusBoy])
pandas.concat
import geopandas as gpd import pandas as pd import urllib.request, json import geopandas as gpd import unicodedata def normalize_str(s): """ Function for name normalization (handle áéíóú) """ if type(s)==float: assert math.isnan(s) return '' return unicodedata.normalize("NFKD", s).encode("ascii","ignore").decode("ascii").upper().lstrip().rstrip() def gen_info(): """ Generate the DataFrames with meta info (covidstats id, population and geografical) """ # Carga la metainfo de departamentos de covidstas y filtramos departamentos de Santa Fe covidstats_meta_df = pd.read_csv('covidstats.csv',sep=';') covidstats_meta_df['LOCATION']='ARGENTINA/'+covidstats_meta_df['Provincia'].apply(normalize_str)+'/'+covidstats_meta_df['Departamento'].apply(normalize_str) covidstats_meta_df=covidstats_meta_df[covidstats_meta_df['LOCATION'].apply(lambda l : l.startswith('ARGENTINA/SANTA FE'))] covidstats_meta_df # Cargamos la info poblacional y chequemos que tengamos toda la info info_df=pd.read_csv('info_general.csv') s = set(info_df['LOCATION']) for l in set(covidstats_meta_df['LOCATION']): if l not in s: print('FALTA INFO DE: {}'.format(l)) # Cargamos la info geografica y chequemos que tengamos toda la info gdf = gpd.read_file('maps_general.geojson') gdf=gdf[gdf['LOCATION'].apply(lambda l : l.startswith('ARGENTINA/SANTA FE'))] s = set(gdf['LOCATION']) for l in set(covidstats_meta_df['LOCATION']): if l not in s: print('FALTA INFO GEOGRAFICA DE: {}'.format(l)) return covidstats_meta_df, info_df, gdf def get_data(department_id): """ Dado un id de departamento de covidstats obtiene los datos """ url='https://covidstats.com.ar/ws/evolucion?comprimido=1&departamentos[]={}' with urllib.request.urlopen(url.format(department_id)) as req: return json.loads(req.read().decode()) COLUMNS = ['dias_confirmacion_fallecimiento', 'terapia', 'casos_con_fis', 'casos_dx', 'fallecidos_reporte', 'dias_diagnostico_terapia', 'dias_diagnostico_fallecimiento', 'casos_fis_ajustada', 'diagnosticos', 'casos_fa', 'casos_reporte', 'internados', 'sospechosos_fa', 'fallecidos_fa', 'casos_reporte_sin_pcr', 'dias_confirmacion_diagnostico', 'casos_fis', 'fallecidos', 'dias_apertura_diagnostico', 'dias_fis_diagnostico'] COLUMNS_PEOPLE = ['terapia', 'casos_con_fis', 'casos_dx', 'fallecidos_reporte', 'casos_fis_ajustada', 'diagnosticos', 'casos_fa', 'casos_reporte', 'internados', 'sospechosos_fa', 'fallecidos_fa', 'casos_reporte_sin_pcr', 'casos_fis', 'fallecidos'] def covidstats_df(data): """ Dado un json de datos de un deparamento de covidstats devuelve un DataFrame con: - indice la fecha - casos_fa, casos_reporte - CAMPO_acum el acumulado respectivo - CAMPO_14acum acumulado en 14 dias """ day = pd.Series(pd.date_range(data['fecha_inicial'], freq="D", periods=len(data['casos_fa']))) df=pd.DataFrame(index=day, columns=COLUMNS,data=zip(*[data[c] for c in COLUMNS])) df=df.sort_index() for c in COLUMNS: df[c+'_acum']=df[c].cumsum() df[c+'_14acum']=df[c+'_acum'].diff(14) return df def covidstats_dfs(covidstats_meta_df): """ Dado un dataframe con LOCATIO, ID departamento CovidStats construye las series respectivas de toda region """ dfs=[] for _,r in covidstats_meta_df.iterrows(): location=r['LOCATION'] print('Procesando {}'.format(location)) data = get_data(r['ID departamento CovidStats']) assert data['fecha_inicial']=='2020-01-01T00:00:00-03:00' df = covidstats_df(data) df['LOCATION']=location dfs.append(df) return
pd.concat(dfs)
pandas.concat
import numpy as np import pandas as pd import random import matplotlib.pyplot as plt import matplotlib.transforms as mtrans from .lib import quantile_ied, CI_estimate def aleplot_1D_continuous(X, model, feature, grid_size=20, include_CI=True, C=0.95): """Compute the accumulated local effect of a numeric continuous feature. This function divides the feature in question into grid_size intervals (bins) and computes the difference in prediction between the first and last value of each interval and then centers the results. Arguments: X -- A pandas DataFrame to pass to the model for prediction. model -- Any python model with a predict method that accepts X as input. feature -- String, the name of the column holding the feature being studied. grid_size -- An integer indicating the number of intervals into which the feature range is divided. include_CI -- A boolean, if True the confidence interval of the effect is returned with the results. C -- A float the confidence level for which to compute the confidence interval Return: A pandas DataFrame containing for each bin: the size of the sample in it and the accumulated centered effect of this bin. """ quantiles = np.append(0, np.arange(1 / grid_size, 1 + 1 / grid_size, 1 / grid_size)) # use customized quantile function to get the same result as # type 1 R quantile (Inverse of empirical distribution function) bins = [X[feature].min()] + quantile_ied(X[feature], quantiles).to_list() bins = np.unique(bins) feat_cut =
pd.cut(X[feature], bins, include_lowest=True)
pandas.cut
# import pandas and numpy, and load the covid data import pandas as pd import numpy as np pd.set_option('display.width', 200)
pd.set_option('display.max_columns', 35)
pandas.set_option
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Combine raw tweets data, per hour, into single CSV file.""" # pylint: disable=invalid-name,too-many-locals,too-many-arguments import os from datetime import datetime from io import StringIO from typing import Dict, List, Union import boto3 import pandas as pd def get_objects_in_one_s3_level( s3b_name: str, content: Union[Dict, str], region: str ) -> Dict: """Get list of all storage objects in single S3 level.""" s3_client = boto3.client("s3", region_name=region) # Get path to hourly sub-folders within each daily folder on S3 bucket prefix = content if isinstance(content, str) else content.get("Prefix") # Get list of all objects in all hourly sub-folders # - each list of is a list of dictionaries, where each dict contains keys: # - Key, LastModified, ETag, Size, StorageClass response_new = s3_client.list_objects_v2( Bucket=s3b_name, Prefix=prefix, Delimiter="/" ) return response_new def get_data_metadata(file: str, s3_bucket_name: str, region: str) -> Dict: """Extract data and file metadata from raw tweets data.""" s3_client = boto3.client("s3", region_name=region) # Get File body (decoded contents) from file dictionary file_body = s3_client.get_object( Bucket=s3_bucket_name, Key=file.get("Key") )["Body"].read() # Get File name from file dictionary file_name = os.path.basename(file["Key"]) return {"file_body": file_body, "file_name": file_name} def get_attrs_extracted_from_tweet_text( row: pd.Series, attr_type: str = "hashtags" ) -> str: """Get attrs (hashtags or usernames) extracted from tweet text.""" # Get extracted attribute (tweet_text_hashtags or tweet_text_usernames) # from each tweet (row of a pandas DataFrame) # - attributes will be the '|' separated string extracted = str(row[f"tweet_text_{attr_type}"]) # Split the string by the pipe operator ('|') to give a single string of # space-separated attributes extracted_separated = ( " " + extracted.replace("|", " ") if str(extracted) != "nan" else "" ) # print( # row.name, # type(extracted_separated), # extracted_separated, # f"extracted_{attr_type}={extracted_separated}", # ) return extracted_separated def get_datetime_string() -> str: """Generate current timestamp as string.""" return datetime.now().strftime("%Y%m%d%H%M%S") def get_hourly_data_metadata( data_list: List, headers: List, fpath: str, cols_to_use: List[str], unwanted_partial_strings_list: List[str], combine_hashtags_usernames: bool = False, get_metadata_agg: bool = False, ) -> List[pd.DataFrame]: """Load raw tweets data and file metadata into DataFrames.""" year, month, day, hour = fpath.split("/", 3)[-1].split("/", 3) dfs = [] dfs_metadata = [] # Loop over list of dictionaries, where each dict corresponds to a # separate file and contains keys: file_name, file_body (file contents) for k, raw_data_contents in enumerate(data_list): # Decode file contents and split by \n giving nested list # - each sub-list is a single tweet and its metadata single_buffer_data_strings = ( raw_data_contents["file_body"].decode("utf-8").split("\n") ) # Iterate over nested list all_buffer_contents = [] for q, data_string in enumerate(single_buffer_data_strings): if data_string: # split each sub-list by \t in order to get values for each # field values = data_string.strip().split("\t") # print( # k+1, # q+1, # len(raw_data_contents["file_body"]), # len(values), # len(values) != len(headers), # data_string, # ) # Append tweet metadata to dict dfs_metadata.append( { "file": k + 1, "file_name": raw_data_contents["file_name"], "encoded_length": len(raw_data_contents["file_body"]), "values_index": q + 1, "len_values": len(values), "malformed_values": len(values) != len(headers), "file_year": year, "file_month": month, "file_day": day, "file_hour": hour[:-1], } ) # Append tweet data to dict (if data is not malformed with # more fields than expected) if len(values) == len(headers): all_buffer_contents.append(values) # Convert nested list of tweet data into DataFrame and append raw data # filename as separate column df_row = pd.DataFrame(all_buffer_contents, columns=headers).assign( file_name=raw_data_contents["file_name"] ) # Append DataFrame of data to empty list dfs.append(df_row) # Vertically concatenate list of DFs of data in order to get single DF of # tweets retrieved per hour df = pd.concat(dfs, ignore_index=True) # Remove tweets with sensitive partial text that are clearly unrelated to # the specified search terms (this list was built up retrospectively) unwanted_partial_strings = "|".join(unwanted_partial_strings_list) df = df[~df["text"].str.lower().str.contains(unwanted_partial_strings)] # (Optional) Combine hashtags and usernames with tweet text (if not done, # then these will be completely excluded from tweet text if combine_hashtags_usernames: # Combine tweet text with space-separated hashtags and usernames that # were extracted from the text of the tweet # - eg. tweet text ('tweet text goes here') will be combined with # hashtags string ('hashtag1 hashtag2 hashtag3') and # user names string ('username1 username2 username3') for attribute_type in ["hashtags", "usernames"]: df[f"{attribute_type}_str"] = df.apply( get_attrs_extracted_from_tweet_text, axis=1, attr_type=attribute_type, ) df["text"] = df["text"] + df["hashtags_str"] + df["usernames_str"] # Slice vertically concatenated data to select required columns all_cols_to_use = cols_to_use + ["file_name"] df = df[all_cols_to_use].dropna() # Vertically concatenate list of DFs of metadata in order to get single DF # of tweets metadata per hour # - metadata will not be filtered so that we have access to statistics # about the raw data that was streamed df_metadata = pd.DataFrame.from_records(dfs_metadata) # (optional) Aggregate metadata by raw data file name if get_metadata_agg: df_metadata_agg = ( df_metadata.groupby(["file_name"], as_index=False) .agg( { "encoded_length": "min", "values_index": "max", "len_values": "min", "malformed_values": "sum", } ) .assign(num_valid_records=len(df)) ) else: # if no aggregation wanted, return empty DataFrame df_metadata_agg = pd.DataFrame() return [df, df_metadata, df_metadata_agg] def create_folder_in_s3_bucket( region: str, s3_bucket_name: str, folder_name: str = "csvs" ) -> None: """Create folder in S3 bucket, if it does not exist.""" s3_client = boto3.client("s3", region_name=region) # List all objects in S3 bucket that are inside the CSVs/ sub-folder folders_response_result = s3_client.list_objects_v2( Bucket=s3_bucket_name, Prefix=f"datasets/twitter/kinesis-demo/{folder_name}", Delimiter="/", ) # Create object (with no body), which will result in an empty folder # (if a folder of the same name is not already present in the CSVs/ # sub-folder) if "CommonPrefixes" in folders_response_result: print( f"Found existing folder {folder_name} in specified S3 bucket. " "Did nothing." ) else: proc_data_folder_creation_response = s3_client.put_object( Bucket=s3_bucket_name, Body="", Key=f"datasets/twitter/kinesis-demo/{folder_name}/", ) print(f"Created folder {folder_name} in bucket.") def get_existing_csv_files_list( s3_bucket_name: str, region: str, prefix: str ) -> List[str]: """Get list of files in subfolder in S3 bucket, by filename prefix.""" s3_resource = boto3.resource("s3", region_name=region) bucket = s3_resource.Bucket(s3_bucket_name) # Get list of objects containing user-specified prefix in filename files_found_objects_list = list(bucket.objects.filter(Prefix=prefix)) # For each object, get dictionary of file attributes under .key attribute # and store these dictionaries in a list files_found_names_list = [w.key for w in files_found_objects_list] return files_found_names_list def save_df_to_csv_on_s3( df: pd.DataFrame, bucket_name: str, filepath: str, region: str, df_type: str = "metadata", ) -> None: """Export DataFrame as CSV to folder in S3 bucket.""" s3_client = boto3.client("s3", region_name=region) # Prepare DataFrame for export to an S3 bucket # - https://stackoverflow.com/a/58636316/4057186 csv_buf = StringIO() df.to_csv(csv_buf, header=True, index=False) csv_buf.seek(0) # Add CSV to bucket under the specified filepath (in this case, under # the CSVs/ sub-folder) s3_client.put_object( Bucket=bucket_name, Body=csv_buf.getvalue(), Key=filepath ) print(f"- Copied {len(df):,} rows of {df_type} to bucket at {filepath}.") def save_data_and_metadata_to_s3_csv( subfolder_path: str, existing_csv_files: List[str], s3_bucket_name: str, headers: List[str], content: Dict, path_to_csvs_folder: str, region: str, cols_to_use: List[str], unwanted_partial_strings_list: List[str], combine_hashtags_usernames: bool = False, aggregate_metadata: bool = False, ) -> None: """Extract tweets data and metadata and export to csvs/ in S3 bucket.""" # Concatenate year, month, day and hour specified by subfolder_path # - 'datasets/twitter/kinesis-demo/2021/12/30/17/' becomes 'hc2021123017' ctime_str = "hc" + subfolder_path.split("/", 3)[-1].rstrip("/").replace( "/", "" ) # Get list of hourly CSVs of data and metadata that already exist in # csvs/ sub-folder in S3 bucket existing_matching_csv_files = [ f for f in existing_csv_files if ctime_str in f ] # Get string with current datetime ftime_str = "s" + get_datetime_string() # For the given subfolder path, if no hourly CSVs of data and metadata # exists in S3 bucket under the CSVs/ sub-folder, then extract these as # pandas DFs and export each # - i.e. if the above list (existing_matching_csv_files) is not empty, # then export data and metadata, else do nothing if not existing_matching_csv_files: # Get list of dictionaries (with file name and file body) in S3 bucket data_list = [ get_data_metadata(file_name, s3_bucket_name, region) for file_name in get_objects_in_one_s3_level( s3_bucket_name, content, region, )["Contents"] ] # Get DFs of tweets data and metadata from file attributes in list of # dicts df, df_metadata, _ = get_hourly_data_metadata( data_list, headers, content.get("Prefix"), cols_to_use, unwanted_partial_strings_list, combine_hashtags_usernames, aggregate_metadata, ) # Change datetime format in DF of data for c in ["created_at", "user_joined"]: df[c] =
pd.to_datetime(df[c])
pandas.to_datetime
# ***************************************************************************** # © Copyright IBM Corp. 2018. All Rights Reserved. # # This program and the accompanying materials # are made available under the terms of the Apache V2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 # # ***************************************************************************** import logging from collections import OrderedDict import pandas as pd from .exceptions import MergeException from .ui import UIMultiItem, UISingle from .util import get_index_names, reset_df_index, UNIQUE_EXTENSION_LABEL logger = logging.getLogger(__name__) class AggregateItems(object): """ Use common aggregation methods to aggregate one or more data items """ is_system_function = True _allow_empty_df = False produces_output_items = True is_simple_aggregator = True granularity = None _input_set = None def __init__(self, input_items, aggregation_function, output_items=None): super().__init__() self.input_items = input_items self.aggregation_function = aggregation_function if output_items is None: output_items = ['%s_%s' % (x, aggregation_function) for x in self.input_items] self.output_items = output_items self._output_list = [] self._output_list.extend(self.output_items) def __str__(self): out = self.__class__.__name__ try: out = out + ' at granularity ' + str(self.granularity) except AttributeError: out = out + ' unknown granularity' if self.input_items is not None: out = out + ' requires inputs %s' % self.input_items else: out = out + ' required inputs not evaluated yet' if self._output_list is not None: out = out + ' produces outputs %s' % self._output_list else: out = out + ' outputs produced not evaluated yet' try: out = out + ' on schedule ' + str(self.schedule) except AttributeError: out = out + ' unknown schedule' return out def get_aggregation_method(self): # Aggregation methods may either be strings like 'sum' or 'count' or class methods # get_available_methods returns a dictionary that converts aggregation method names to class names when needed methods = self.get_available_methods() out = methods.get(self.aggregation_function, None) if out is None: raise ValueError('Invalid aggregation function specified: %s' % self.aggregation_function) return out def get_input_set(self): out = set(self.input_items) gran = self.granularity if gran is not None: out |= set(gran.dimensions) else: raise ValueError('Aggregate function %s has no granularity' % self.aggregation_function) return out @classmethod def build_ui(cls): inputs = [] inputs.append(UIMultiItem(name='input_items', datatype=None, description='Choose the data items that you would like to aggregate', output_item='output_items', is_output_datatype_derived=True)) aggregate_names = list(cls.get_available_methods().keys()) inputs.append( UISingle(name='aggregation_function', description='Choose aggregation function', values=aggregate_names)) return (inputs, []) @classmethod def count_distinct(cls, series): return len(series.dropna().unique()) @classmethod def get_available_methods(cls): return {'sum': 'sum', 'count': 'count', 'count_distinct': cls.count_distinct, 'min': 'min', 'max': 'max', 'mean': 'mean', 'median': 'median', 'std': 'std', 'var': 'var', 'first': 'first', 'last': 'last', 'product': 'product'} class DataAggregator(object): """ Default simple aggregation stage. Parameters: ----------- granularity: Granularity object agg_dict: dict Pandas aggregation dictionary complex_aggregators: list List of AS complex aggregation functions AS aggregation functions have an execute method that can be called inside of a pandas apply() on a groupby() to create a dataframe or series """ is_system_function = True _allow_empty_df = False _discard_prior_on_merge = True produces_output_items = True is_data_aggregator = True def __init__(self, name, granularity, agg_dict, input_items, output_items, complex_aggregators=None): self.name = name self._agg_dict = agg_dict self.granularity = granularity if complex_aggregators is None: complex_aggregators = [] self._complex_aggregators = complex_aggregators self.input_items = input_items self.output_items = output_items self._input_set = set(self.input_items) self._output_list = self.output_items def __str__(self): msg = 'Aggregator: %s with granularity: %s. ' % (self.name, self.granularity.name) for key, value in list(self._agg_dict.items()): msg = msg + ' Aggregates %s using %s .' % (key, value) for s in self._complex_aggregators: msg = msg + ' Uses %s to produce %s .' % (s.name, s._output_list) return msg def execute(self, df=None): gfs = [] group = df.groupby(self.granularity.grouper) if not self._agg_dict is None and self._agg_dict: gf = group.agg(self._agg_dict) gfs.append(gf) for s in self._complex_aggregators: gf = group.apply(s.execute) gfs.append(gf) df = pd.concat(gfs, axis=1) df.columns = self.output_items logger.info('Completed aggregation: %s', self.granularity.name) return df class DataMerge(object): """ A DataMerge object combines the results of execution of a stage with the results of execution of the previous stages. By default, a DataMerge object initializes itself with an empty dataframe. Although the main purpose of the DataMerge object is maintaining a dataframe, it can also keep track of any constants and dimension lookups required added during job processing so that it can re-apply constants if needed. Use the execute method to combine a new incoming data object with whatever data is present in the DataMerge at the time. """ is_system_function = True r_suffix = UNIQUE_EXTENSION_LABEL def __init__(self, name=None, df=None, **kwargs): if name is None: name = self.__class__.__name__ self.name = name if df is None: df = pd.DataFrame() self.df = df self.constants = kwargs.get('constants', None) self.df_dimension = kwargs.get('df_dimension', None) if self.constants is None: self.constants = {} def __str__(self): out = ('DataMerge object has data structures: dataframe with %s rows' ' and %s constants ' % (len(self.df.index), len(self.constants))) return out def add_constant(self, name, value): """ Register a constant provide a value. Apply the constant to the dataframe. """ self.constants[name] = value self.df[name] = value def apply_constants(self): """ Apply the values of all constants to the dataframe. """ for name, value in list(self.constants.items()): self.df[name] = value def clear_data(self): """ Clear dataframe and constants """ self.constants = {} self.df = pd.DataFrame() def coalesce_cols(self, df, suffix): """ Combine two variants of the same column into a single. Variants are distinguished using a suffix, e.g. 'x' and 'x_new_' will be combined if the suffix of '_new_' is used. The coalesced result will be placed in column 'x' and will contain 'x_new' where a value of 'x_new' was provided and 'x' where the value of 'x_new' was null. """ altered = [] for i, o in enumerate(df.columns): try: drop = "%s%s" % (o, suffix) df[o] = df[o].fillna(df[drop]) altered.append(drop) except KeyError: pass if len(altered) > 0: cols = [x for x in list(df.columns) if x not in altered] df = df[cols] return df def convert_to_df(self, obj, col_names, index): df =
pd.DataFrame(data=obj, columns=col_names)
pandas.DataFrame
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn import tree from sklearn.metrics import classification_report # from sklearn.multiclass import OneVsRestClassifier # Matplotlib中文显示问题 # plt.rcParams['font.family'] = 'simhei' # plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题 food = pd.read_csv('../data/per_month_sale_and_risk.csv') # Step 2. 数据预处理 # Step 2.1 把对属性start_time和place进行编码 start_time = pd.get_dummies(food.开始时间) place = pd.get_dummies(food.地区名) # 地区+月份+销售额 data = pd.concat([place, start_time], axis=1) data =
pd.concat([data, food['销售额']], axis=1)
pandas.concat
import pandas as pd from for_coco.load_imgs import load_imgs def make_image_df(dataset): """ Making a Pandas Dataframe of image information """ # Loading image informations images = load_imgs(dataset) # Making lists of image information columns image_ids = [img['id'] for img in images] image_names = [img['file_name'] for img in images] widths = [img['width'] for img in images] heights = [img['height'] for img in images] image_df_dict = { 'IMAGE_ID': image_ids, 'IMAGE_NAME': image_names, 'WIDTH': widths, 'HEIGHT': heights } # Making a Pandas Dataframe image_df =
pd.DataFrame(image_df_dict)
pandas.DataFrame
from ast import literal_eval from os import PathLike from pathlib import Path import pickle from shutil import unpack_archive from typing import Any from typing import Dict from typing import Union import geopandas as gpd import momepy import networkx as nx import numpy as np import pandas as pd from shapely.geometry import Point from tqdm import tqdm import geopandas as gpd import pandas as pd import geopandas as gpd import numpy as np import pandas as pd from scipy.spatial import cKDTree def check_file_exists(product: PathLike, filepath: str): assert Path(filepath).exists(), f"Please upload {Path(filepath).name} to data/raw" def unzip_esb_cad_data(product: PathLike, upstream: Dict[str, PathLike]) -> None: unpack_archive( filename=upstream["check_esb_cad_data_is_uploaded"], extract_dir=Path(product).parent, ) def convert_mv_lv_data_to_parquet(product: Any, upstream: Dict[str, PathLike]) -> None: dublin_mv_index = pd.read_csv(upstream["download_dublin_mv_index"], squeeze=True) dirpath = Path(upstream["unzip_esb_cad_data"]) / "Dig Request Style" / "MV-LV Data" network = [gpd.read_file(dirpath / f"{id}.dgn") for id in dublin_mv_index] mv_lv_network = pd.concat(network) # set coordinate reference system to irish grid mv_lv_network.crs = "EPSG:29903" # convert to irish transverse mercator mv_lv_network.to_crs(epsg=2157).to_parquet(product) def _convert_dataframe_to_geodataframe( df: pd.DataFrame, x: str, y: str, from_crs: str, to_crs: str = "EPSG:2157", ) -> gpd.GeoDataFrame: return gpd.GeoDataFrame( df, geometry=gpd.points_from_xy(df[x], df[y], crs=from_crs) ).to_crs(to_crs) def extract_dublin_substations(upstream: Any, product: Any) -> None: substations = pd.read_csv(upstream["download_esb_substation_capacities"]).pipe( _convert_dataframe_to_geodataframe, x="Longitude", y="Latitude", from_crs="EPSG:4326", to_crs="EPSG:2157", ) small_area_boundaries = gpd.read_file( str(upstream["download_dublin_small_area_boundaries"]) ).to_crs("EPSG:2157") dublin_substations = gpd.sjoin(substations, small_area_boundaries, op="within") dublin_substations.to_file(str(product), driver="GPKG") def extract_dublin_network_lines(upstream: Any, product: Any) -> None: network = gpd.read_parquet(upstream["convert_mv_lv_data_to_parquet"]).to_crs( epsg=2157 ) dublin_boundary = gpd.read_file(str(upstream["download_dublin_boundary"])).to_crs( epsg=2157 ) mv_network_lines = network.query("Level in [10, 11, 14]") dublin_mv_network_lines = mv_network_lines.overlay( dublin_boundary, how="intersection" ) # explode converts multi-part geometries to single-part which is req by networkx dublin_mv_network_lines.explode(ignore_index=True).to_file( str(product), driver="GPKG" ) def convert_network_lines_to_networkx(upstream: Any, product: Any) -> None: network = gpd.read_file( str(upstream["extract_dublin_network_lines"]), driver="GPKG" ).dropna(subset=["geometry"]) G = momepy.gdf_to_nx(network, approach="primal") G_dm = nx.DiGraph(G) with open(product, "wb") as f: pickle.dump(G_dm, f) def _join_nearest_points( gdA: gpd.GeoDataFrame, gdB: gpd.GeoDataFrame ) -> gpd.GeoDataFrame: nA = np.array(list(gdA.geometry.apply(lambda x: (x.x, x.y)))) nB = np.array(list(gdB.geometry.apply(lambda x: (x.x, x.y)))) btree = cKDTree(nB) _, idx = btree.query(nA, k=1) gdB_nearest = gdB.iloc[idx].reset_index(drop=True) return pd.concat( [ gdA.reset_index(drop=True).drop(columns="geometry"), gdB_nearest, ], axis=1, ) def find_nearest_nodes_to_stations_on_network( upstream: Any, product: Any, substation_type: str ) -> None: substations = ( gpd.read_file(str(upstream["extract_dublin_substations"])) .query("`Voltage Class` == @substation_type") .reset_index(drop=True) ) with open(upstream["convert_network_lines_to_networkx"], "rb") as f: G = pickle.load(f) nodes_as_points = gpd.GeoDataFrame( {"geometry": [Point(n) for n in G.nodes()]}, crs="EPSG:2157" ) nearest_node_points = _join_nearest_points( substations[["geometry"]], nodes_as_points ) nearest_node_ids = ( nearest_node_points.geometry.apply(lambda x: str(x.coords[0])) .rename("nearest_node_ids") .to_frame() ) nearest_node_ids.to_parquet(product) def calculate_path_lengths_along_network_between_substations( upstream: Any, product: Any ) -> None: with open(upstream["convert_network_lines_to_networkx"], "rb") as f: G = pickle.load(f) nearest_node_ids = ( pd.read_parquet(upstream["find_nearest_nodes_to_stations_on_network"]) .squeeze() .apply(literal_eval) # convert "(x,y)" to (x,y) as G uses tuples as keys ) dirpath = Path(product) dirpath.mkdir(exist_ok=True) unique_nearest_node_ids = nearest_node_ids.drop_duplicates() for i, origin in enumerate(tqdm(unique_nearest_node_ids)): individual_distances = [] for target in unique_nearest_node_ids: try: length = nx.dijkstra_path_length( G, source=origin, target=target, weight="length" ) except nx.NetworkXNoPath: length = np.inf individual_distances.append(length) all_distances =
pd.DataFrame({f"{i}": individual_distances})
pandas.DataFrame
# -*- coding: utf-8 -*- import geopandas as gpd import multiprocessing as mp import numpy as np import os import pandas as pd import re import seaborn as sns import sys import time from tqdm import tqdm from matplotlib import pyplot as plt import warnings from hs_process.utilities import defaults from hs_process.utilities import hsio from hs_process.segment import segment from hs_process.spec_mod import spec_mod from hs_process.spatial_mod import spatial_mod from concurrent.futures import ThreadPoolExecutor from concurrent.futures import as_completed class batch(object): ''' Class for batch processing hyperspectral image data. Makes use of `segment`_, `spatial_mod`_, and `spec_mod`_ to batch process many datacubes in a given directory. Supports options to save full datacubes, geotiff renders, as well as summary statistics and/or reports for the various tools. Note: It may be a good idea to review and understand the `defaults`_, `hsio`_, `hstools`_, `segment`_, `spatial_mod`_, and `spec_mod`_ classes prior to using the ``batch`` module. .. _defaults: hs_process.defaults.html .. _hsio: hs_process.hsio.html .. _hstools: hs_process.hstools.html .. _segment: hs_process.segment.html .. _spatial_mod: hs_process.spatial_mod.html .. _spec_mod: hs_process.spec_mod.html ''' def __init__(self, base_dir=None, search_ext='.bip', dir_level=0, lock=None, progress_bar=False): ''' Parameters: base_dir (``str``, optional): directory path to search for files to spectrally clip; if ``fname_list`` is not ``None``, ``base_dir`` will be ignored (default: ``None``). search_ext (``str``): file format/extension to search for in all directories and subdirectories to determine which files to process; if ``fname_list`` is not ``None``, ``search_ext`` will be ignored (default: 'bip'). dir_level (``int``): The number of directory levels to search; if ``None``, searches all directory levels (default: 0). lock (``multiprocessing.Lock``): Can be passed to ensure lock is in place when writing to a file during multiprocessing. ''' self.base_dir = base_dir self.search_ext = search_ext self.dir_level = dir_level self.lock = lock self.progress_bar = progress_bar self.fname_list = None if base_dir is not None: self.fname_list = self._recurs_dir(base_dir, search_ext, dir_level) self.io = hsio() self.my_spectral_mod = None self.my_spatial_mod = None self.my_segment = None def _try_spat_crop_col_key(self, key, df_row): ''' Gets value of ``key`` (column name) from ``df_row``; returns ``None`` if there is a KeyError This is tricky for crop_X and buf_X columns, because we must decipher whether to get these values from the default pool or not. If we get a KeyError, our first instinct is to gather the default, but we must check the "inverse" first (the "inverse" of crop_e_pix is crop_e_m) to avoid overwriting a value passed in df_row unintentionally. Therefore, this function handles keys differently if "crop" or "buf" are part of ``key`` than if they are not part of ``key`` Adds ``key`` to batch.io.defaults.spat_crop_cols if it does not yet exist, but then of course the ``value`` that is returned will be ``None`` ''' if key not in self.io.defaults.spat_crop_cols.keys(): print('Adding key "{0}" to defaults.spat_crop_cols dictionary' ''.format(key)) self.io.defaults.spat_crop_cols[key] = key try: value = df_row[self.io.defaults.spat_crop_cols[key]] except KeyError: # try to retrieve a default value # decide whehter to get default or not.. how? # check the inverse to see if it is accesible # try: # value = self.io.defaults.crop_defaults[key] # except KeyError: # value = None if 'crop' in key or 'buf' in key: key_base = key[:key.find('_', key.rfind('_'))] key_unit = key[key.find('_', key.rfind('_')):] if key_unit == '_m': key_unit_inv = '_pix' elif key_unit == '_pix': key_unit_inv = '_m' try: value_inv = df_row[self.io.defaults.spat_crop_cols[key_base+key_unit_inv]] # exists; set to NaN and carry on value = None except KeyError: # neither exist, gather default try: value = self.io.defaults.crop_defaults[key] except KeyError: value = None else: # proceed as normal try: value = self.io.defaults.crop_defaults[key] except KeyError: value = None # if key in ['crop_e_m', 'crop_n_m', 'crop_e_pix', 'crop_n_pix']: # print('Key: {0} Value: {1}'.format(key, value)) return value def _check_processed(self, fname_list, base_dir_out, folder_name, name_append, append_extra=None, ext=None): ''' Checks if any files in fname_list have already (presumably) undergone processing. This is determined by checking if a file exists with a particular name based on the filename in fname_list and naming parameters (i.e,. ``folder_name`` and ``name_append``). Parameters: ext (``str``): e.g., '.spec' ''' if append_extra is None: append_extra = '' fname_list_final = fname_list.copy() for fname in fname_list: if base_dir_out is None: base_dir = os.path.split(fname)[0] dir_out, name_append = self._save_file_setup( base_dir, folder_name, name_append) else: dir_out, name_append = self._save_file_setup( base_dir_out, folder_name, name_append) name_print = self._get_name_print(fname) if ext is None: name_label = (name_print + name_append + append_extra + '.' + self.io.defaults.envi_write.interleave) else: name_label = (name_print + name_append + append_extra + ext) if os.path.isfile(os.path.join(dir_out, name_label)): fname_list_final.remove(fname) msg1 = ('There are no files to process. Please check if files have ' 'already undergone processing. If existing files should be ' 'overwritten, be sure to set the ``out_force`` parameter.\n') msg2 = ('Processing {0} files. If existing files should be ' 'overwritten, be sure to set the ``out_force`` parameter.\n' ''.format(len(fname_list_final))) if not len(fname_list_final) > 0: warnings.warn(msg1, UserWarning, stacklevel=0) # else: # print(msg2) time.sleep(0.2) # when using progress bar, this keeps from splitting lines return fname_list_final def _crop_read_sheet(self, row): ''' Reads the necessary information from the spreadsheet and saves it to a dictionary If this function causes an error, try checking ``batch.io.defaults.spat_crop_col`` - these should be adjusted according to the default column names of the input (i.e., ``fname_sheet``). ''' crop_specs = { 'directory': self._try_spat_crop_col_key('directory', row), 'fname': self._try_spat_crop_col_key('fname', row), 'name_short': self._try_spat_crop_col_key('name_short', row), 'name_long': self._try_spat_crop_col_key('name_long', row), 'ext': self._try_spat_crop_col_key('ext', row), 'plot_id_ref': self._try_spat_crop_col_key('plot_id_ref', row), 'pix_e_ul': self._try_spat_crop_col_key('pix_e_ul', row), 'pix_n_ul': self._try_spat_crop_col_key('pix_n_ul', row), 'alley_size_e_m': self._try_spat_crop_col_key('alley_size_e_m', row), 'alley_size_n_m': self._try_spat_crop_col_key('alley_size_n_m', row), 'alley_size_e_pix': self._try_spat_crop_col_key('alley_size_e_pix', row), 'alley_size_n_pix': self._try_spat_crop_col_key('alley_size_n_pix', row), 'buf_e_m': self._try_spat_crop_col_key('buf_e_m', row), 'buf_n_m': self._try_spat_crop_col_key('buf_n_m', row), 'buf_e_pix': self._try_spat_crop_col_key('buf_e_pix', row), 'buf_n_pix': self._try_spat_crop_col_key('buf_n_pix', row), 'crop_e_m': self._try_spat_crop_col_key('crop_e_m', row), 'crop_n_m': self._try_spat_crop_col_key('crop_n_m', row), 'crop_e_pix': self._try_spat_crop_col_key('crop_e_pix', row), 'crop_n_pix': self._try_spat_crop_col_key('crop_n_pix', row), 'gdf_shft_e_pix': self._try_spat_crop_col_key('gdf_shft_e_pix', row), 'gdf_shft_n_pix': self._try_spat_crop_col_key('gdf_shft_n_pix', row), 'gdf_shft_e_m': self._try_spat_crop_col_key('gdf_shft_e_m', row), 'gdf_shft_n_m': self._try_spat_crop_col_key('gdf_shft_n_m', row), 'n_plots_x': self._try_spat_crop_col_key('n_plots_x', row), 'n_plots_y': self._try_spat_crop_col_key('n_plots_y', row), 'n_plots': self._try_spat_crop_col_key('n_plots', row)} if crop_specs['fname'] is None: try: crop_specs['fname'] = (crop_specs['name_short'] + crop_specs['name_long'] + crop_specs['ext']) except TypeError: crop_specs['fname'] = None if crop_specs['fname'] is not None: base_name = os.path.basename(crop_specs['fname']) if crop_specs['name_short'] is None: crop_specs['name_short'] = base_name[ :base_name.find('-', base_name.rfind('_'))] if crop_specs['name_long'] is None: crop_specs['name_long'] = base_name[ base_name.find('-', base_name.rfind('_')):] if crop_specs['ext'] is None: crop_specs['ext'] = os.path.splitext(crop_specs['fname'])[1] for col_name in row.index: if col_name not in self.io.defaults.spat_crop_cols.keys(): crop_specs[col_name] = row[col_name] if not pd.notnull(crop_specs['name_long']): crop_specs['name_long'] = None if not pd.notnull(crop_specs['plot_id_ref']): crop_specs['plot_id_ref'] = None if not pd.notnull(crop_specs['name_short']): crop_specs['name_short'] = None self.crop_specs = crop_specs return crop_specs def _pix_to_mapunit(self, crop_specs, spyfile=None): ''' Looks over specifications of ``crop_specs``, and converts betweeen pixel units and map units if one is populated and the other is ``None`` ''' cs = crop_specs.copy() if spyfile is None: spyfile = self.io.spyfile spy_ps_e = float(spyfile.metadata['map info'][5]) spy_ps_n = float(spyfile.metadata['map info'][6]) # Crop size # if cs['crop_e_pix'] is None and cs['crop_e_m'] is not None: if pd.isnull(cs['crop_e_pix']) and pd.notnull(cs['crop_e_m']): cs['crop_e_pix'] = int(cs['crop_e_m'] / spy_ps_e) elif pd.notnull(cs['crop_e_pix']) and pd.isnull(cs['crop_e_m']): cs['crop_e_m'] = cs['crop_e_pix'] * spy_ps_e if pd.isnull(cs['crop_n_pix']) and pd.notnull(cs['crop_n_m']): cs['crop_n_pix'] = int(cs['crop_n_m'] / spy_ps_n) elif pd.notnull(cs['crop_n_pix']) and pd.isnull(cs['crop_n_m']): cs['crop_n_m'] = cs['crop_n_pix'] * spy_ps_n # Buffer if pd.isnull(cs['buf_e_pix']) and pd.notnull(cs['buf_e_m']): cs['buf_e_pix'] = int(cs['buf_e_m'] / spy_ps_e) elif pd.notnull(cs['buf_e_pix']) and pd.isnull(cs['buf_e_m']): cs['buf_e_m'] = cs['buf_e_pix'] * spy_ps_e if pd.isnull(cs['buf_n_pix']) and pd.notnull(cs['buf_n_m']): cs['buf_n_pix'] = int(cs['buf_n_m'] / spy_ps_e) elif pd.notnull(cs['buf_n_pix']) and pd.isnull(cs['buf_n_m']): cs['buf_n_m'] = cs['buf_n_pix'] * spy_ps_e # Shift if pd.isnull(cs['gdf_shft_e_pix']) and pd.notnull(cs['gdf_shft_e_m']): cs['gdf_shft_e_pix'] = int(cs['gdf_shft_e_m'] / spy_ps_e) elif pd.notnull(cs['gdf_shft_e_pix']) and pd.isnull(cs['gdf_shft_e_m']): cs['gdf_shft_e_m'] = cs['gdf_shft_e_pix'] * spy_ps_e if pd.isnull(cs['gdf_shft_n_pix']) and pd.notnull(cs['gdf_shft_n_m']): cs['gdf_shft_n_pix'] = int(cs['gdf_shft_n_m'] / spy_ps_e) elif pd.notnull(cs['gdf_shft_n_pix']) and pd.isnull(cs['gdf_shft_n_m']): cs['gdf_shft_n_m'] = cs['gdf_shft_n_pix'] * spy_ps_e # Alley size if (pd.isnull(cs['alley_size_e_pix']) and pd.notnull(cs['alley_size_e_m'])): cs['alley_size_e_pix'] = int(cs['alley_size_e_m'] / spy_ps_e) elif (pd.notnull(cs['alley_size_e_pix']) and pd.isnull(cs['alley_size_e_m'])): cs['alley_size_e_m'] = cs['alley_size_e_pix'] * spy_ps_e if (pd.isnull(cs['alley_size_n_pix']) and pd.notnull(cs['alley_size_n_m'])): cs['alley_size_n_pix'] = int(cs['alley_size_n_m'] / spy_ps_n) elif (pd.notnull(cs['alley_size_n_pix']) and pd.isnull(cs['alley_size_n_m'])): cs['alley_size_n_m'] = cs['alley_size_n_pix'] * spy_ps_n self.crop_specs = cs return cs def _composite_band_setup(self, base_dir_out, fname, folder_name, name_append): ''' ''' if base_dir_out is None: base_dir = os.path.dirname(fname) dir_out, name_append = self._save_file_setup( base_dir, folder_name, name_append) else: dir_out, name_append = self._save_file_setup( base_dir_out, folder_name, name_append) name_print = self._get_name_print() return dir_out, name_print, name_append def _band_math_setup(self, base_dir_out, folder_name, fname, name_append, method): ''' ''' msg = ('``method`` must be one of either "ndi", "ratio", "derivative", ' 'or "mcari2".\n') assert method in ['ndi', 'ratio', 'derivative', 'mcari2'], msg if base_dir_out is None: base_dir = os.path.dirname(fname) dir_out, name_append = self._save_file_setup( base_dir, folder_name, name_append) else: dir_out, name_append = self._save_file_setup( base_dir_out, folder_name, name_append) name_print = self._get_name_print() return dir_out, name_print, name_append # if method == 'ndi': # print('Calculating normalized difference index for: {0}' # ''.format(name_print)) # elif method == 'ratio': # print('Calculating simple ratio index for: {0}' # ''.format(name_print)) # elif method == 'mcari2': # print('Calculating MCARI2 index for: {0}' # ''.format(name_print)) def _mask_stats_setup(self, mask_thresh, mask_percentile, mask_side): ''' Parse thesholds and percentiles to dynamically set column names for masked df_stats ''' if mask_thresh is not None: if not isinstance(mask_thresh, list): mask_thresh = [mask_thresh] mask_thresh_print = '-'.join([str(x) for x in mask_thresh]) if mask_percentile is not None: if not isinstance(mask_percentile, list): mask_percentile = [mask_percentile] mask_pctl_print = '-'.join([str(x) for x in mask_percentile]) if mask_side is not None: if not isinstance(mask_side, list): mask_side = [mask_side] mask_side_print = '-'.join([str(x) for x in mask_side]) if mask_thresh is not None and mask_percentile is not None: type_mask = ('mask-{0}-thresh-{1}-pctl-{2}'.format( mask_side_print, mask_thresh_print, mask_pctl_print)) elif mask_thresh is not None and mask_percentile is None: type_mask = ('mask-{0}-thresh-{1}'.format( mask_side_print, mask_thresh_print)) elif mask_thresh is None and mask_percentile is not None: type_mask = ('mask-{0}-pctl-{1}'.format( mask_side_print, mask_pctl_print)) columns = ['fname', 'plot_id', type_mask + '-count', type_mask + '-mean', type_mask + '-stdev', type_mask + '-median'] df_stats = pd.DataFrame(columns=columns) return df_stats, type_mask def _mask_single_stats(self, fname, array_bm, metadata_bm, mask_thresh, mask_percentile, mask_side, df_stats): ''' Creates the bandmath mask and summarizes the band math values after masking unwanted pixels. Returns the single masked bandmath array and the stats dataframe with the new image data appended as a row ''' array_mask, metadata_bm = self.io.tools.mask_array( array_bm, metadata_bm, thresh=mask_thresh, percentile=mask_percentile, side=mask_side) # array_mask, metadata_bm = hsbatch.io.tools.mask_array( # array_bandmath1, metadata_bandmath1, thresh=mask_thresh, # percentile=mask_percentile, side=mask_side) # stat_mask_count = np.count_nonzero(~np.isnan(array_mask)) # all nan values should be masked from mask_array() function stat_mask_count = array_mask.count() stat_mask_mean = array_mask.mean() stat_mask_std = array_mask.std() stat_mask_med = np.ma.median(array_mask) # stat_mask_mean = np.nanmean(array_mask) # stat_mask_std = np.nanstd(array_mask) # stat_mask_med = np.nanmedian(array_mask) data = [fname, self.io.name_plot, stat_mask_count, stat_mask_mean, stat_mask_std, stat_mask_med] df_stats_temp = pd.DataFrame(data=[data], columns=df_stats.columns) df_stats = df_stats.append(df_stats_temp, ignore_index=True) return array_mask, df_stats def _mask_two_step(self, mask_dir, mask_side, mask_thresh, mask_percentile, fname, df_stats1, df_stats2, name_label): ''' Performs a two-step masking process. The masked masked bandmath arrays and stats for each step are returned. ''' msg1 = ('Either ``mask_thresh`` or ``mask_percentile`` is a ' 'list, but ``mask_dir`` is not a list. If trying to ' 'perform a "two-step" masking process, please be sure ' 'to pass a list with length of two for both ' '``mask_dir`` and ``mask_side``, as well as either ' 'for ``mask_thresh`` or ``mask_percentile``.\n' '``mask_dir``: {0}\n``mask_side``: {1}' ''.format(mask_dir, mask_side)) msg2 = ('Either ``mask_thresh`` or ``mask_percentile`` is a ' 'list, but ``mask_side`` is not a list. If trying to ' 'perform a "two-step" masking process, please be sure ' 'to pass a list with length of two for both ' '``mask_dir`` and ``mask_side``, as well as either ' 'for ``mask_thresh`` or ``mask_percentile``.\n' '``mask_dir``: {0}\n``mask_side``: {1}' ''.format(mask_dir, mask_side)) assert isinstance(mask_dir, list), msg1 assert isinstance(mask_side, list), msg2 array_bandmath1, metadata_bandmath1 = self._get_array_similar( mask_dir[0]) array_bandmath2, metadata_bandmath2 = self._get_array_similar( mask_dir[1]) if isinstance(mask_thresh, list): array_mask1, df_stats1 = self._mask_single_stats( fname, array_bandmath1, metadata_bandmath1, mask_thresh[0], None, mask_side[0], df_stats1) array_mask2, df_stats2 = self._mask_single_stats( fname, array_bandmath2, metadata_bandmath2, mask_thresh[1], None, mask_side[1], df_stats2) elif isinstance(mask_percentile, list): array_mask1, df_stats1 = self._mask_single_stats( fname, array_bandmath1, metadata_bandmath1, None, mask_percentile[0], mask_side[0], df_stats1) array_mask2, df_stats2 = self._mask_single_stats( fname, array_bandmath2, metadata_bandmath2, None, mask_percentile[1], mask_side[1], df_stats2) return array_mask1, array_mask2, df_stats1, df_stats2 def _execute_mask(self, fname_list, mask_dir, base_dir_out, folder_name, name_append, write_datacube, write_spec, write_geotiff, mask_thresh, mask_percentile, mask_side): ''' Actually creates the mask to keep the main function a bit cleaner ''' if mask_side == 'outside': # thresh/pctl will be a list, so take care of this first df_stats1, type_mask1 = self._mask_stats_setup( mask_thresh, mask_percentile, mask_side) df_stats2 = None type_mask2 = None # if mask_side is not "outside" and thresh is list, then it's a 2-step elif isinstance(mask_thresh, list): if not isinstance(mask_side, list): maskside = [mask_side, mask_side] # ensure that mask_side is two parts as well df_stats1, type_mask1 = self._mask_stats_setup(mask_thresh[0], None, mask_side[0]) df_stats2, type_mask2 = self._mask_stats_setup(mask_thresh[1], None, mask_side[1]) elif isinstance(mask_percentile, list): if not isinstance(mask_side, list): maskside = [mask_side, mask_side] # ensure that mask_side is two parts as well df_stats1, type_mask1 = self._mask_stats_setup(None, mask_percentile[0], mask_side[0]) df_stats2, type_mask2 = self._mask_stats_setup(None, mask_percentile[1], mask_side[1]) else: df_stats1, type_mask1 = self._mask_stats_setup(mask_thresh, mask_percentile, mask_side) df_stats2 = None type_mask2 = None fname_list_p = tqdm(fname_list) if self.progress_bar is True else fname_list for idx, fname in enumerate(fname_list_p): if self.progress_bar is True: fname_list_p.set_description('Processing file {0}/{1}'.format(idx, len(fname_list))) self.io.read_cube(fname) metadata = self.io.spyfile.metadata.copy() metadata_geotiff = self.io.spyfile.metadata.copy() base_dir = os.path.dirname(fname) if base_dir_out is None: dir_out, name_append = self._save_file_setup( base_dir, folder_name, name_append) else: dir_out, name_append = self._save_file_setup( base_dir_out, folder_name, name_append) name_print = self._get_name_print() name_label = (name_print + name_append + '.' + self.io.defaults.envi_write.interleave) if self._file_exists_check( dir_out, name_label, write_datacube=write_datacube, write_spec=write_spec, write_geotiff=write_geotiff) is True: continue # array = self.io.spyfile.load() array = self.io.spyfile.open_memmap() if mask_dir is None: mask_dir = os.path.join(self.io.base_dir, 'band_math') if df_stats2 is not None: array_mask1, array_mask2, df_stats1, df_stats2 =\ self._mask_two_step(mask_dir, mask_side, mask_thresh, mask_percentile, fname, df_stats1, df_stats2, name_label) array_mask = np.logical_or(array_mask1.mask, array_mask2.mask) else: # things are much simpler array_bandmath1, metadata_bandmath1 = self._get_array_similar( mask_dir) array_mask, df_stats1 = self._mask_single_stats( fname, array_bandmath1, metadata_bandmath1, mask_thresh, mask_percentile, mask_side, df_stats1) array_mask = array_mask.mask spec_mean, spec_std, datacube_masked = self.io.tools.mean_datacube( array, array_mask) self.spec_mean = spec_mean self.spec_std = spec_std hist_str = (" -> hs_process.batch.segment_create_mask[<" "label: 'mask_thresh?' value:{0}; " "label: 'mask_percentile?' value:{1}; " "label: 'mask_side?' value:{2}>]" "".format(mask_thresh, mask_percentile, mask_side)) metadata['history'] += hist_str metadata_geotiff['history'] += hist_str if write_datacube is True: self._write_datacube(dir_out, name_label, datacube_masked, metadata) if write_spec is True: name_label_spec = (os.path.splitext(name_label)[0] + '-mean.spec') self._write_spec(dir_out, name_label_spec, spec_mean, spec_std, metadata) self.array_mask = array_mask if write_geotiff is True: self._write_geotiff(array_mask, fname, dir_out, name_label, metadata_geotiff, self.io.tools) if len(df_stats1) > 0: fname_stats1 = os.path.join(dir_out, type_mask1 + '.csv') df_stats1.to_csv(fname_stats1, index=False) if df_stats2 is not None: if len(df_stats2) > 0: # fname_csv2 = 'mask-stats2.csv' fname_stats2 = os.path.join(dir_out, type_mask2 + '.csv') df_stats2.to_csv(fname_stats2, index=False) # # should we make an option to save a mean spectra as well? # # Yes - we aren't required to save intermediate results and do # # another batch process..? we get everything done in one shot - # # after all, why do we want to do band math if we aren't also # # calculating the average of the area (unless cropping hasn't # # been perfomed yet)? # # No - Keep it simpler and keep batch functions more specific in # # their capabilities (e.g., batch.band_math, batch.mask_array, # # batch.veg_spectra) # def _write_stats(self, dir_out, df_stats, fname_csv='stats.csv'): ''' Writes df_stats to <dir_out>, ensuring lock is in place if it exists to work as expected with parallel processing. ''' fname_stats = os.path.join(dir_out, fname_csv) if self.lock is not None: with self.lock: if os.path.isfile(fname_stats): df_stats_in = pd.read_csv(fname_stats) df_stats = df_stats_in.append(df_stats) df_stats.to_csv(fname_stats, index=False) else: if os.path.isfile(fname_stats): df_stats_in = pd.read_csv(fname_stats) df_stats = df_stats_in.append(df_stats) df_stats.to_csv(fname_stats, index=False) def _execute_composite_band(self, fname_list, base_dir_out, folder_name, name_append, write_geotiff, wl1, b1, list_range, plot_out): ''' Actually executes the composit band to keep the main function a bit cleaner ''' type_bm = '-comp-{0}'.format(int(np.mean(wl1))) columns = ['fname', 'plot_id', 'count', 'mean', 'std_dev', 'median', 'pctl_10th', 'pctl_25th', 'pctl_50th', 'pctl_75th', 'pctl_90th', 'pctl_95th'] df_stats = pd.DataFrame(columns=columns) fname_list_p = tqdm(fname_list) if self.progress_bar is True else fname_list for idx, fname in enumerate(fname_list_p): if self.progress_bar is True: fname_list_p.set_description('Processing file {0}/{1}'.format(idx, len(fname_list))) self.io.read_cube(fname) dir_out, name_print, name_append = self._composite_band_setup( base_dir_out, fname, folder_name, name_append) self.my_segment = segment(self.io.spyfile) name_label = (name_print + name_append + type_bm + '.{0}' ''.format(self.io.defaults.envi_write.interleave)) if self._file_exists_check( dir_out, name_label, write_datacube=True, write_geotiff=write_geotiff, write_plot=plot_out) is True: continue array_b1, metadata = self.my_segment.composite_band( wl1=wl1, b1=b1, list_range=list_range, print_out=False) stat_count = np.count_nonzero(~np.isnan(array_b1)) stat_mean = np.nanmean(array_b1) stat_std = np.nanstd(array_b1) stat_med = np.nanmedian(array_b1) stat_pctls = np.nanpercentile(array_b1, [10, 25, 50, 75, 90, 95]) data = [fname, self.io.name_plot, stat_count, stat_mean, stat_std, stat_med, stat_pctls[0], stat_pctls[1], stat_pctls[2], stat_pctls[3], stat_pctls[4], stat_pctls[5]] df_stats_temp = pd.DataFrame(data=[data], columns=columns) df_stats = df_stats.append(df_stats_temp, ignore_index=True) if plot_out is True: fname_fig = os.path.join(dir_out, os.path.splitext(name_label)[0] + '.png') self.io.tools.plot_histogram( array_b1, fname_fig=fname_fig, title=name_print, xlabel=array_b1.upper(), percentile=90, bins=50, fontsize=14, color='#444444') metadata['label'] = name_label self._write_datacube(dir_out, name_label, array_b1, metadata) if write_geotiff is True: self._write_geotiff(array_b1, fname, dir_out, name_label, metadata, self.my_segment.tools) if len(df_stats) > 0: self._write_stats(dir_out, df_stats, fname_csv=name_append[1:] + '-stats.csv') def _execute_band_math(self, fname_list, base_dir_out, folder_name, name_append, write_geotiff, method, wl1, wl2, wl3, b1, b2, b3, list_range, plot_out): ''' Actually executes the band math to keep the main function a bit cleaner ''' if method == 'ndi' or method == 'ratio': type_bm = ('{0}-{1}-{2}'.format(method, int(np.mean(wl1)), int(np.mean(wl2)))) elif method == 'derivative': type_bm = ('{0}-{1}-{2}-{3}'.format(method, int(np.mean(wl1)), int(np.mean(wl2)), int(np.mean(wl2)))) elif method == 'mcari2': type_bm = ('{0}-{1}-{2}-{3}'.format(method, int(np.mean(wl1)), int(np.mean(wl2)), int(np.mean(wl2)))) columns = ['fname', 'plot_id', 'count', 'mean', 'std_dev', 'median', 'pctl_10th', 'pctl_25th', 'pctl_50th', 'pctl_75th', 'pctl_90th', 'pctl_95th'] df_stats =
pd.DataFrame(columns=columns)
pandas.DataFrame
import sys import os import shutil import subprocess import copy import numpy as np import pandas as pd import random import time import tempfile import re from pathlib import Path from cascade_at.core.log import logging, get_loggers, LEVELS """ If there is mtspecific, hold out mtexcess on the ode fit. Set the mulcov bounds Check convergence Check prediction """ from cascade_at.dismod.constants import _dismod_cmd_ _fit_ihme_py_ = 'fit_ihme.py' _max_iters_ = 500 sys.path.append('/Users/gma/Projects/IHME/GIT/cascade-at/src') from cascade_at.dismod.api.dismod_io import DismodIO __compare_to_fit_ihme_dot_py__ = False if __compare_to_fit_ihme_dot_py__: class LOG: def info(msg): print (msg) def error(msg): print (msg) else: LOG = get_loggers(__name__) logging.basicConfig(level=LEVELS['info']) def system (command) : # flush python's pending standard output in case this command generates more standard output sys.stdout.flush() LOG.info(command) if isinstance(command, str): kwds = dict(shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: kwds = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE) run = subprocess.run(command, **kwds) if run.returncode != 0 : raise Exception(f'"{command}" failed.') if run.stderr: print(run.stderr.decode()) if run.stdout: print(run.stdout.decode()) def compare_dataframes(df0, df1): # FIXME -- poor design, should probably return the error between the dataframes instead # of raising an exeption or returning a string tol = {'atol': 1e-8, 'rtol': 1e-10} assert set(df0.columns) == set(df1.columns), "Can't compare dataframes with different columns." tmp = (df0.fillna(-1) != df1.fillna(-1)) mask0 = tmp.any(1).values mask1 = tmp.any(0).values msg = '' if mask0.any(): diff0 = df0.loc[mask0, mask1] diff1 = df1.loc[mask0, mask1] numeric_cols = [k for k in diff0.columns if not (isinstance(diff0[:1][k].squeeze(), str) or isinstance(diff1[:1][k].squeeze(), str))] error = np.max(np.abs(diff0[numeric_cols] - diff1[numeric_cols])) if not error.empty: msg = f' within tolerance, max(abs(error)) = {error}' if not np.allclose(diff0[numeric_cols], diff1[numeric_cols], **tol): print (diff0) print (diff1) raise Exception('ERROR: dataframes do not match') return f'Dataframes are equal{msg}.' class FitNoODE(DismodIO): def __init__(db, *args, ode_hold_out_list = (), **kwds): if 'dismod' in kwds: db.dismod = kwds.pop('dismod') else: db.dismod = _dismod_cmd_ super().__init__(*args, **kwds) db.predict_integrands = [ 'susceptible', 'withC' ] db.enough_mtspecific = 100 db.input_data = db.data db.ode_hold_out_list = ode_hold_out_list db.set_integrand_lists() msg = '\nInitial integrands = ' + str( db.integrands ) LOG.info(msg) # ============================================================================ # Utilities that use database tables but do not modify them # ============================================================================ def relative_covariate(db, covariate_id) : column_name = 'x_{}'.format(covariate_id) # sex is an absolute covariate and has 3 values, -0.5, 0.0, +0.5 # one is an absolute covariate and has perhaps 2 values, 0.0, 1.0 # it is reasonable to assume that a covariate with more than 3 covariate values is relative return len(set(db.data[column_name])) > 3 def set_integrand_lists (db) : # If ode is true (false) get list of integrands that require # (do not require) the ode to model. integrand_model_uses_ode = {'prevalence', 'Tincidence', 'mtspecific', 'mtall', 'mtstandard'} data = db.data.merge(db.integrand, how='left') integrands = [n for n in data.integrand_name.unique() if np.any(data.loc[data.integrand_name == n, 'hold_out'].values == 0).any()] db.yes_ode_integrands = sorted(set(integrands).intersection(integrand_model_uses_ode)) db.no_ode_integrands = sorted(set(integrands) - integrand_model_uses_ode) db.integrands = db.yes_ode_integrands + db.no_ode_integrands def get_rate_case(db): iota_zero = not np.isfinite(db.rate.loc[db.rate.rate_name == 'iota', 'parent_smooth_id']).squeeze() rho_zero = not np.isfinite(db.rate.loc[db.rate.rate_name == 'rho', 'parent_smooth_id']).squeeze() chi_zero = not np.isfinite(db.rate.loc[db.rate.rate_name == 'chi', 'parent_smooth_id']).squeeze() rate_case = ('iota_zero' if iota_zero else 'iota_pos') + '_' + ('rho_zero' if rho_zero else 'rho_pos') return rate_case def new_smoothing(db, integrand_name, age_grid, time_grid, value_prior, dage_prior, dtime_prior): # Add a new smoothing that has one prior that is used for all age and # time grid points. The smooth, smooth_grid, age, and time tables are # modified, but the new versions are not written by this routine. # The arguments value_prior, dage_prior, dtime_prior, # contain the priors used in the smothing. # smooth = db.smooth smooth_grid = db.smooth_grid prior = db.prior n_age = len(age_grid) n_time = len(time_grid) age_id_list = db.age.loc[db.age.age.isin(age_grid), 'age_id'].tolist() time_id_list = db.time.loc[db.time.time.isin(time_grid), 'time_id'].tolist() new_smooth_id = len(smooth) # # add value_prior to prior_table new_value_prior_id = len(prior) prior = prior.append(copy.copy(value_prior), ignore_index=True) # # add dage_prior to prior table new_dage_prior_id = len(prior) prior = prior.append(copy.copy(dage_prior), ignore_index=True) # # add dtime_prior to prior table new_dtime_prior_id = len(prior) prior = prior.append(copy.copy(dtime_prior), ignore_index=True) # # add row to smooth_table smooth_name = f'{integrand_name}_noise_smoothing_{new_smooth_id}' row = {'smooth_name' : smooth_name , 'n_age' : n_age , 'n_time' : n_time , 'mulstd_value_prior_id' : None , 'mulstd_dage_prior_id' : None , 'mulstd_dtime_prior_id' : None , } smooth = smooth.append(row, ignore_index=True) # # add rows to smooth_grid_table for i in range(n_age) : for j in range(n_time) : row = {'smooth_id' : new_smooth_id , 'age_id' : age_id_list[i] , 'time_id' : time_id_list[j] , 'value_prior_id' : new_value_prior_id , 'dage_prior_id' : new_dage_prior_id , 'dtime_prior_id' : new_dtime_prior_id , 'const_value' : None , } smooth_grid = smooth_grid.append(row, ignore_index=True) # # return the new smoothing smooth = smooth.reset_index(drop=True); smooth['smooth_id'] = smooth.index smooth_grid = smooth_grid.reset_index(drop=True); smooth_grid['smooth_grid_id'] = smooth_grid.index prior = prior.reset_index(drop=True); prior['prior_id'] = prior.index return new_smooth_id, smooth, smooth_grid, prior # ============================================================================= # Routines that Only Change Data Table # ============================================================================= def random_subsample_data(db, integrand_name, max_sample) : # for a specified integrand, sample at most max_sample entries. # This does random sampling using hold_out that can be seeded by calling random.seed. # # This code may seem a little obtuse, but it matches Brad's sampling method so they can be compared # data = db.data.merge(db.integrand, how='left') integrand = data[data.integrand_name == integrand_name] n_sample_in = len(integrand) n_sample_out = min(max_sample, len(integrand)) LOG.info (f'random_subsample_data') LOG.info (f'number of {integrand_name} samples: in = {n_sample_in} out = {n_sample_out}') # Note: A preferred, direct random sampling (e.g., integrand.sample(n_sample_out)) didn't match Brad's sampling # Sample the integrand dataframe row index if n_sample_out < n_sample_in : index = list(range(len(integrand))) holdout_index = sorted(set(index) - set(random.sample(index, n_sample_out))) # Get the data table indices corresponding to the holdouts data_indices = integrand.iloc[holdout_index].index.tolist() data.loc[data_indices, 'hold_out'] = 1 db.data = data[db.data.columns] def hold_out_data (db, integrand_names=(), node_names=(), hold_out=False) : if isinstance(integrand_names, str): integrand_names = [integrand_names] if isinstance(node_names, str): node_names = [node_names] data = db.data.merge(db.integrand).merge(db.node) mask = [False]*len(data) if integrand_names: mask |= data.integrand_name.isin(integrand_names) if node_names: mask |= data.node_name.isin(node_names) LOG.info (f"Setting hold_out = {hold_out} for integrand {integrand_names}, node {node_names}") data.loc[mask, 'hold_out'] = hold_out db.data = data[db.data.columns] def set_data_likelihood (db, integrand_name, density_name, factor_eta=None, nu=None): # For a specified integrand, set its density, eta, and nu. # The default value for eta and nu is None. # If factor_eta is not None, eta is set to the factor times the median # value for the integrand. assert (factor_eta is None) or 0.0 <= factor_eta # msg = '\nset_data_likelihood\n' msg += 'integrand = {}'.format(integrand_name) msg += ', density = {}'.format(density_name) if factor_eta is not None : msg += ', eta = m*{}'.format(factor_eta) if nu is not None : msg += ', nu = {}'.format(nu) data = db.data.merge(db.integrand, how='left') # density_id = int(db.density.loc[db.density.density_name == density_name, 'density_id']) mask = data.integrand_name == integrand_name if factor_eta is None : eta = None else : median = data[data.hold_out == 0].meas_value[mask].median() eta = factor_eta * median data.loc[mask, 'density_id'] = density_id data.loc[mask, 'eta'] = eta data.loc[mask, 'nu'] = nu db.data = data[db.data.columns] msg += f'\n = {median:6.4f} where m is the median of the {integrand_name} data' LOG.info( msg ) def set_student_likelihoods(db, factor_eta = 1e-2, nu = 5): integrand_list = db.integrand.loc[db.data.integrand_id.unique(), 'integrand_name'].tolist() density_name = 'log_students' factor_eta = 1e-2 nu = 5 for integrand_name in integrand_list : if integrand_name not in ['mtall']: db.set_data_likelihood(integrand_name, density_name, factor_eta, nu) def compress_age_time_intervals(db, age_size = 10.0, time_size = 10.0): data = db.data mask = (data.age_upper - data.age_lower) <= age_size mean = data[['age_lower', 'age_upper']].mean(axis=1) data.loc[mask, 'age_lower'] = data.loc[mask, 'age_upper'] = mean[mask] mask = (data.time_upper - data.time_lower) <= time_size mean = data[['time_lower', 'time_upper']].mean(axis=1) data.loc[mask, 'time_lower'] = data.loc[mask, 'time_upper'] = mean[mask] LOG.info ('compress_age_time_intervals -- all integrands') LOG.info ('Use midpoint for intervals less than or equal specified size') db.data = data[db.data.columns] # ============================================================================ # Routines that Change Other Tables # ============================================================================ def get_option (db, name) : # Set option specified by name to its value where name and value are # strings. The routine system_command to prints the processing message # for this operation. option = db.option value = option.loc[option.option_name == name, 'option_value'] if value.empty: return(None) return value.squeeze() def set_option (db, name, value) : # Set option specified by name to its value where name and value are # strings. The routine system_command to prints the processing message # for this operation. system(f'{db.dismod} {db.path} set option {name} {value}') def new_zero_smooth_id (db, smooth_id) : # FIXME: Remove this when bounds on mulcov work # add a new smoothing that has the same grid as smooth_id smoothing # and that constrains to zero. The smooth and smooth_grid tables are # modified by this routine but they are not written out. if smooth_id is None : return None # smooth = db.smooth smooth_grid = db.smooth_grid new_smooth_id = len(smooth) new_row = copy.copy( smooth[smooth.smooth_id == smooth_id] ) new_row['smooth_name'] = f'zero_smoothing #{new_smooth_id}' smooth = smooth.append(new_row).reset_index(drop=True) smooth['smooth_id'] = smooth.index # mask = smooth_grid.smooth_id == smooth_id for i, old_row in smooth_grid.iterrows() : if old_row['smooth_id'] == smooth_id : new_row = copy.copy( old_row ) new_row['smooth_id'] = new_smooth_id new_row['value_prior_id'] = None new_row['dage_prior_id'] = None new_row['dtime_prior_id'] = None new_row['const_value'] = 0.0 smooth_grid = smooth_grid.append( new_row ).reset_index(drop=True) smooth_grid['smooth_grid_id'] = smooth_grid.index db.smooth = smooth db.smooth_grid = smooth_grid def new_bounded_smooth_id (db, smooth_id, lower, upper, density_name = 'uniform', smooth_name = '') : # add a new smoothing that has the same grid as smooth_id smoothing # and that constrains value to be within the specified lower and upper # bounds.The prior, smooth and smooth_grid tables are modified but # they are not written out. The lower and upper bounds can be None. def mean_from_limits(lower, upper): if lower is None and upper is None : mean = 0.0 elif lower is not None and upper is not None : mean = (lower + upper) / 2.0 elif lower is None: mean = 0.0 if upper >= 0.0 else upper elif upper is None: mean = 0.0 if lower <= 0.0 else lower else: raise Exception ('Tests failed') return mean smooth_table = db.smooth smooth_grid_table = db.smooth_grid prior_table = db.prior if smooth_id is None : return None # mean = mean_from_limits(lower, upper) # # smooth_table new_smooth_id = len(smooth_table) new_row = copy.copy( smooth_table.loc[smooth_id] ) new_row['smooth_id'] = new_smooth_id new_row['smooth_name'] = f'{smooth_name}bound_smoothing_' + str( new_smooth_id ) smooth_table = smooth_table.append( new_row ) # new_prior_id = len(prior_table) density_id = int(db.density.loc[db.density.density_name == density_name, 'density_id']) std = np.nan if density_name == 'uniform' else np.sqrt(1/12)*(upper-lower) value_prior = { 'prior_name' : 'smoothing_{}_centered_prior'.format(new_smooth_id), 'prior_id' : new_prior_id, 'density_id' : density_id, 'lower' : lower, 'upper' : upper, 'mean' : mean, 'std' : std, 'eta' : np.nan, 'nu' : np.nan, } prior_table = prior_table.append( [value_prior] ) # for i, old_row in smooth_grid_table.iterrows() : if old_row['smooth_id'] == smooth_id : new_row = copy.copy( old_row ) new_row['smooth_id'] = new_smooth_id new_row['smooth_grid_id'] = len(smooth_grid_table) new_row['value_prior_id'] = new_prior_id new_row['dage_prior_id'] = None new_row['dtime_prior_id'] = None new_row['const_value'] = None smooth_grid_table = smooth_grid_table.append( new_row ) smooth_table = smooth_table.reset_index(drop=True) smooth_grid_table = smooth_grid_table.reset_index(drop=True) db.smooth = smooth_table db.smooth_grid = smooth_grid_table db.prior = prior_table return new_smooth_id def set_covariate_reference (db, covariate_id, reference_name = 'median') : # set the reference value for a specified covariate where reference_name # is 'mean' or 'median' # # covariate_value data = db.data[db.data.hold_out == 0] covariate = db.covariate covariate_name = covariate.loc[covariate_id, 'covariate_name'] covariate_value = data[covariate_name].tolist() old_reference = float(covariate.loc[covariate_id, 'reference']) new_reference = getattr(np, reference_name)(covariate_value) # covariate.loc[covariate_id, 'reference'] = new_reference # msg = '\nset_covariate_reference\n' msg += f'for covariate = {covariate_name}' msg += f', covariate_id = {covariate_id}' msg += f', reference_name = {reference_name}' msg += f'\nold_reference = {old_reference:.5g}' msg += f', new_reference = {new_reference:.5g}' LOG.info( msg ) # db.covariate = covariate def set_mulcov_bound(db, covariate_id, max_covariate_effect = 2) : # Set bounds for all of the multipliers for a specified covariate so # corresponding absolute effect is bounded by # disease_specific_max_covariate_effect. # Noise covariate multipliers are not included. # The bounds for an integerand are set to zero if the covariate # is identically equalt the reference for that integrand. assert max_covariate_effect >= 0.0, 'disease specific max_covariate_effect is negative' data = db.data[db.data.hold_out == 0] covariate = db.covariate mulcov = db.mulcov # # difference_dict = covariate minus reference covariate_name = f'x_{covariate_id}' reference = float(covariate.loc[covariate_id, 'reference']) difference_dict = {integrand_id: (data.loc[(data.integrand_id == integrand_id) & ~data[covariate_name].isna(), covariate_name] - reference).values for integrand_id in data.integrand_id.unique()} if data[covariate_name].notna().any(): difference_dict[covariate_name] = data[covariate_name] - reference # # lower_dict and upper_dict lower_dict = dict() upper_dict = dict() for integrand_id in difference_dict : # # maximum and minimum difference min_difference = min(difference_dict[integrand_id]) max_difference = max(difference_dict[integrand_id]) # # initialize lower = - float("inf") upper = + float("inf") if max_difference > 0 : upper = min(upper, max_covariate_effect / max_difference) lower = max(lower, - max_covariate_effect / max_difference) if min_difference < 0 : upper = min(upper, - max_covariate_effect / min_difference) lower = max(lower, max_covariate_effect / min_difference) if upper == float("inf") : lower = 0.0 upper = 0.0 lower_dict[integrand_id] = lower upper_dict[integrand_id] = upper for i,row in mulcov.iterrows() : if row['covariate_id'] == covariate_id : lower = - float('inf') upper = + float('inf') integrand_id = row['integrand_id'] if integrand_id in difference_dict : lower = lower_dict[integrand_id] upper = upper_dict[integrand_id] assert row['mulcov_type'] != 'rate_value' elif integrand_id is not None and np.isfinite(integrand_id): lower = 0.0 upper = 0.0 assert row['mulcov_type'] != 'rate_value' else : assert row['mulcov_type'] == 'rate_value' covariate_name = 'x_{}'.format(covariate_id) if covariate_name in difference_dict : lower = lower_dict[covariate_name] upper = upper_dict[covariate_name] else : lower = 0.0 upper = 0.0 if row['mulcov_type'] != 'meas_noise' : group_smooth_id = row['group_smooth_id'] group_smooth_id = db.new_bounded_smooth_id( group_smooth_id, lower, upper, 'uniform' ) row['group_smooth_id'] = group_smooth_id # subgroup_smooth_id = row['subgroup_smooth_id'] subgroup_smooth_id = db.new_bounded_smooth_id( subgroup_smooth_id, lower, upper, 'uniform' ) row['subgroup_smooth_id'] = subgroup_smooth_id mulcov.loc[i] = row # integrand_name = db.integrand.loc[db.integrand.integrand_id == integrand_id, 'integrand_name'].squeeze() msg = '\nset_mulcov_bound\n' msg += 'integrand = {}, covariate = x_{}, max_covariate_effect = {}, ' msg += 'lower = {:.5g}, upper = {:.5g}' msg = msg.format(integrand_name, covariate_id, max_covariate_effect, lower, upper) LOG.info( msg ) db.mulcov = mulcov def set_mulcov_value(db, covariate_name, rate_or_integrand_name, mulcov_value) : # Set the value for a specific covariate multiplier. # The corresponding multiplier must be in the covariate table. # Noise covariate multipliers are not included. # mulcov = (db.mulcov.merge(db.covariate, how='left').merge(db.rate, how='left').merge(db.integrand, how='left')) mask = ((mulcov.covariate_name == covariate_name) & (mulcov.rate_name == rate_or_integrand_name) & mulcov.mulcov_type.isin(['rate_value', 'meas_value'])) assert mask.any(), f'Failed to find {covariate_name} for {rate_or_integrand_name} in mulcov table.' matches = mulcov[mask] for i, row in matches.iterrows(): lower = upper = mulcov_value group_smooth_id = db.new_bounded_smooth_id(row.group_smooth_id, lower, upper) mulcov.loc[mulcov.mulcov_id == row.mulcov_id, 'group_smooth_id'] = group_smooth_id # subgroup_smooth_id = db.new_bounded_smooth_id(row.subgroup_smooth_id, lower, upper) mulcov.loc[mulcov.mulcov_id == row.mulcov_id, 'subgroup_smooth_id'] = subgroup_smooth_id LOG.info (f'\nset_mulcov_value') LOG.info (f'covariate = {covariate_name}, {row.mulcov_type} = {rate_or_integrand_name}, value = {mulcov_value:.5g}') # db.mulcov = mulcov[db.mulcov.columns] def add_meas_noise_mulcov(db, integrand_name, group_id, factor) : # Add a meas_noise covariate multiplier for a specified integrand. # integrand_data: is the current result of get_integrand_data. # group_id: specifies the group for the covariate multiplier. # # factor: is a dictionary with following keys: mean, lower, upper. # For each key the factor multipliers the absolute value of the # median of the data for this integrand to get the corresponding value # in the uniform prior for the square root of the covariate multiplier. # In other words, the factor is times a value is in standard deviation # units, while the prior values are in variance units. # # Note that meas_noise multipliers can't have # ramdom effect (so the subgroup id is null in the mulcov table). integrand = db.integrand data = db.data[db.data.hold_out == 0].merge(integrand, how='left') subgroup = db.subgroup covariate = db.covariate mulcov = db.mulcov smooth = db.smooth smooth_grid = db.smooth_grid prior = db.prior density = db.density tst = (0.0 <= factor['lower'] <= factor['mean'] <= factor['upper']) assert tst, 'Factor is not monotonically increasing.' mask = (subgroup.group_id == group_id).values group_name = subgroup.loc[mask, 'group_name'].squeeze() assert group_name or not group_name.empty, 'Group name error for group_id = {group_id}' # This covariate_id is the identically_one_covariate covariate_id = int(covariate.loc[covariate.c_covariate_name == 's_one', 'covariate_id']) integrand_id = int(integrand.loc[integrand.integrand_name == integrand_name, 'integrand_id']) mulcov_values = mulcov[['mulcov_type', 'integrand_id', 'covariate_id']].values.tolist() if ['meas_noise', integrand_id, covariate_id] in mulcov_values: return # median = abs( data.loc[data.integrand_name == integrand_name, 'meas_value'].median() ) lower = ( median * factor['lower'] )**2 mean = ( median * factor['mean'] )**2 upper = ( median * factor['upper'] )**2 # msg = '\nadd_meas_noise_mulcov\n' msg += f'integrand = {integrand_name}, group = {group_name}, uniform value prior\n' msg += f'lower = (|median|*{factor["lower"]})^2 = {lower:.5g}\n' msg += f'mean = (|median|*{factor["mean"]})^2 = {mean:.5g}\n' msg += f'upper = (|median|*{factor["upper"]})^2 = {upper:.5g}\n' msg += 'where median is the median of the {} data'.format(integrand_name) LOG.info( msg ) # mulcov_id = len(mulcov) # # prior used in one point smoothing density_id = int(density.loc[density.density_name == 'uniform', 'density_id']) value_prior = { 'prior_name' : integrand_name + '_meas_noise_value_prior' , 'density_id' : density_id , 'lower' : lower , 'upper' : upper , 'mean' : mean , 'std' : np.nan , 'eta' : None , 'nu' : None , } dage_prior = copy.copy( value_prior ) dtime_prior = copy.copy( value_prior ) dage_prior['prior_name'] = integrand_name + '_meas_noise_dage_prior' dtime_prior['prior_name'] = integrand_name + '_meas_noise_dtime_prior' # # new one point smoothing age_grid = db.age.loc[:0, 'age'].tolist() time_grid = db.time.loc[:0, 'time'].tolist() smooth_id, smooth, smooth_grid, prior = db.new_smoothing( integrand_name, age_grid, time_grid, value_prior, dage_prior, dtime_prior ) # # new row in mulcov_table row = dict(mulcov_id = None, mulcov_type = 'meas_noise', covariate_id = covariate_id, integrand_id = integrand_id, group_id = group_id, group_smooth_id = smooth_id) mulcov = mulcov.append(row, ignore_index=True) # # write out the tables that changed mulcov = mulcov.reset_index(drop=True); mulcov['mulcov_id'] = mulcov.index smooth = smooth.reset_index(drop=True); smooth['smooth_id'] = smooth.index smooth_grid = smooth_grid.reset_index(drop=True); smooth_grid['smooth_grid_id'] = smooth_grid.index prior = prior.reset_index(drop=True); prior['prior_id'] = prior.index db.mulcov = mulcov db.smooth = smooth db.smooth_grid = smooth_grid db.prior = prior def check_last_command(db, command): log = db.log last_begin = [l for i,l in log.iterrows() if l.message_type == 'command' and l.message.startswith('begin ')] rtn = True if not last_begin: LOG.error(f"ERROR: Failed to find a 'begin' command.") rtn = False else: last_begin = last_begin[-1] if rtn: start_cmd = [l for i,l in log[last_begin.log_id:].iterrows() if l.message_type == 'command' and l.message.startswith(f'begin {command}')] if not start_cmd: LOG.error(f"ERROR: Expected 'begin {command}' but found '{last_begin.message}'.") rtn = False else: start_cmd = start_cmd[-1] if rtn: end_cmd = [l for i,l in log[start_cmd.log_id:].iterrows() if l.message_type == 'command' and l.message.startswith(f'end {command}')] if not end_cmd: LOG.error(f"ERROR: Did not find end for this '{start_cmd.message}' command") rtn = False for i,l in log[start_cmd.log_id:].iterrows(): if l.message_type in ['error', 'warning']: LOG.info (f"DISMOD {l.message_type}: {l.message.rstrip()}") rtn = False if rtn: LOG.info (f"{db.dismod} {command} OK") else: LOG.error (f"ERROR: {db.dismod} {command} had errors, warnings, or failed to complete.") return rtn def set_avgint(db, covariate_integrand_list) : # ----------------------------------------------------------------------- # create avgint table # For each covariate_integrand # For data row corresponding to this covariate_integrand # For each predict_intgrand # write a row with specified covariates for predict_integrand #----------------------------------------------------------------------- # covariate_id_list = db.integrand.loc[db.integrand.integrand_name.isin(covariate_integrand_list), 'integrand_id'].tolist() predict_id_list = db.integrand.loc[db.integrand.integrand_name.isin(db.predict_integrands), 'integrand_id'].tolist() # First access of an empty db.avgint does not have covariate names. # Once it is initialized, it does. This is DismodIO weirdness. db.avgint = pd.DataFrame() avgint_cols = db.avgint.columns.tolist() cov_cols = sorted(set(db.covariate.covariate_name) - set(avgint_cols)) avgint_cols += cov_cols try: data = db.data[db.data.data_id.isin(db.data_subset.data_id)] except: data = db.data data = data[data.integrand_id.isin(covariate_id_list)] data['avgint_id'] = data['data_id'] covariate_data = data[data.integrand_id.isin(covariate_id_list)] avgint =
pd.DataFrame()
pandas.DataFrame
"""tests.core.archive.test_archive.py Copyright Keithley Instruments, LLC. Licensed under MIT (https://github.com/tektronix/syphon/blob/master/LICENSE) """ import os from typing import List, Optional, Tuple import pytest from _pytest.capture import CaptureFixture from _pytest.fixtures import FixtureRequest from _pytest.monkeypatch import MonkeyPatch from pandas import DataFrame, concat, read_csv from pandas.testing import assert_frame_equal from py._path.local import LocalPath from sortedcontainers import SortedDict, SortedList import syphon import syphon.hash import syphon.schema from syphon.core.archive.filemap import MappingBehavior from syphon.core.check import DEFAULT_FILE as DEFAULT_HASH_FILE from ... import get_data_path, rand_string from ...assert_utils import assert_captured_outerr from ...types import PathType @pytest.fixture( params=[ ("iris.csv", SortedDict({"0": "Name"})), ("iris_plus.csv", SortedDict({"0": "Species", "1": "PetalColor"})), ( "auto-mpg.csv", SortedDict({"0": "model year", "1": "cylinders", "2": "origin"}), ), ] ) def archive_params(request: FixtureRequest) -> Tuple[str, SortedDict]: return request.param @pytest.fixture( params=[ ( "iris-part-1-of-6", "iris-part-1-of-6-combined.csv", SortedDict({"0": "Species", "1": "PetalColor"}), ), ( "iris-part-2-of-6", "iris-part-2-of-6-combined.csv", SortedDict({"0": "Species", "1": "PetalColor"}), ), ( "iris-part-3-of-6", "iris-part-3-of-6-combined.csv", SortedDict({"0": "Species", "1": "PetalColor"}), ), ( "iris-part-4-of-6", "iris-part-4-of-6-combined.csv", SortedDict({"0": "Species", "1": "PetalColor"}), ), ( "iris-part-5-of-6", "iris-part-5-of-6-combined.csv", SortedDict({"0": "Species", "1": "PetalColor"}), ), ( "iris-part-6-of-6", "iris-part-6-of-6-combined.csv", SortedDict({"0": "Species", "1": "PetalColor"}), ), ] ) def archive_meta_params(request: FixtureRequest) -> Tuple[str, str, SortedDict]: return request.param @pytest.fixture(params=[PathType.ABSOLUTE, PathType.RELATIVE]) def styled_cache_file(request: FixtureRequest, cache_file: LocalPath) -> str: """Breaks if any test in this file changes the current working directory!""" if request.param == PathType.ABSOLUTE: return str(cache_file) elif request.param == PathType.RELATIVE: return os.path.relpath(cache_file, os.getcwd()) else: raise TypeError(f"Unsupported PathType '{request.param}'") @pytest.fixture(params=[PathType.ABSOLUTE, PathType.RELATIVE]) def styled_hash_file( request: FixtureRequest, hash_file: Optional[LocalPath] ) -> Optional[str]: """Breaks if any test in this file changes the current working directory!""" if hash_file is None: return None if request.param == PathType.ABSOLUTE: return str(hash_file) elif request.param == PathType.RELATIVE: return os.path.relpath(hash_file, os.getcwd()) else: raise TypeError(f"Unsupported PathType '{request.param}'") def _get_expected_paths( path: str, schema: SortedDict, subset: DataFrame, filename: str, data: SortedList = SortedList(), ) -> SortedList: path_list = data.copy() this_schema = schema.copy() try: _, header = this_schema.popitem(index=0) except KeyError: path_list.add(os.path.join(path, filename)) return path_list if header not in subset.columns: return path_list for value in subset.get(header).drop_duplicates().values: new_subset = subset.loc[subset.get(header) == value] value = value.lower().replace(" ", "_") if value[-1] == ".": value = value[:-1] path_list = _get_expected_paths( os.path.join(path, value), this_schema, new_subset, filename, data=path_list ) return path_list class TestArchive(object): class ArchiveCacheAndHashPassthruChecker(object): """Asserts that the cache and hash file paths are not edited before being sent to the `build` subcommand. """ def __init__( self, monkeypatch: MonkeyPatch, cache_file: str, hash_file: Optional[str], ): from syphon.core.build import build self._syphon_build = build self._monkeypatch: MonkeyPatch = monkeypatch self.cache_file: str = cache_file self.hash_file: Optional[str] = hash_file def __call__(self, *args, **kwargs) -> bool: with self._monkeypatch.context() as m: m.setattr(syphon.core.build, "build", value=self._build_shim) return syphon.archive(*args, **kwargs) def _build_shim(self, *args, **kwargs) -> bool: """Everything is converted to str or None so test cases don't have to worry about using LocalPath. """ # XXX: If the syphon.build argument order changes, # then we need to access a different index! assert ( str(args[0]) == self.cache_file ), f"Cache filepath edited from '{self.cache_file}' to '{args[0]}'" # XXX: If the name of the argument changes, # then we need to access a different key! assert "hash_filepath" in kwargs actual_hash_file = ( None if kwargs["hash_filepath"] is None else str(kwargs["hash_filepath"]) ) assert ( actual_hash_file == self.hash_file ), f"Hash filepath edited from '{self.hash_file}' to '{actual_hash_file}'" return self._syphon_build(*args, **kwargs) @pytest.fixture(scope="function") def archive_fixture( self, monkeypatch: MonkeyPatch, styled_cache_file: str, styled_hash_file: Optional[str], ) -> "TestArchive.ArchiveCacheAndHashPassthruChecker": return TestArchive.ArchiveCacheAndHashPassthruChecker( monkeypatch, styled_cache_file, styled_hash_file ) def test_empty_datafile( self, capsys: CaptureFixture, archive_dir: LocalPath, verbose: bool ): datafile = os.path.join(get_data_path(), "empty.csv") assert not syphon.archive(archive_dir, [datafile], verbose=verbose) assert_captured_outerr(capsys.readouterr(), verbose, False) assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock")) def test_increment_one_to_many_with_metadata_with_schema( self, capsys: CaptureFixture, archive_dir: LocalPath, archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker", schema_file: Optional[LocalPath], verbose: bool, ): # List of (expected frame filename, data filename, metadata filename) tuples targets: List[Tuple[str, str, List[str]]] = [ ( "iris-part-1-of-6-combined.csv", "iris-part-1-of-6.csv", [ "iris-part-1-of-6-meta-part-1-of-2.meta", "iris-part-1-of-6-meta-part-2-of-2.meta", ], ), ( "iris-part-1-2.csv", "iris-part-2-of-6.csv", [ "iris-part-2-of-6-meta-part-1-of-2.meta", "iris-part-2-of-6-meta-part-2-of-2.meta", ], ), ( "iris-part-1-2-3.csv", "iris-part-3-of-6.csv", [ "iris-part-3-of-6-meta-part-1-of-2.meta", "iris-part-3-of-6-meta-part-2-of-2.meta", ], ), ( "iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv", [ "iris-part-4-of-6-meta-part-1-of-2.meta", "iris-part-4-of-6-meta-part-2-of-2.meta", ], ), ( "iris-part-1-2-3-4-5.csv", "iris-part-5-of-6.csv", [ "iris-part-5-of-6-meta-part-1-of-2.meta", "iris-part-5-of-6-meta-part-2-of-2.meta", ], ), ( "iris_plus.csv", "iris-part-6-of-6.csv", [ "iris-part-6-of-6-meta-part-1-of-2.meta", "iris-part-6-of-6-meta-part-2-of-2.meta", ], ), ] expected_hashfile = ( LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE) if archive_fixture.hash_file is None else archive_fixture.hash_file ) assert not os.path.exists(expected_hashfile) assert not os.path.exists(archive_fixture.cache_file) assert len(archive_dir.listdir()) == 0 expected_schemafile = ( archive_dir.join(syphon.schema.DEFAULT_FILE) if schema_file is None else schema_file ) assert not os.path.exists(expected_schemafile) syphon.init( SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile ) assert os.path.exists(expected_schemafile) for expected_frame_filename, data_filename, metadata_filenames in targets: assert archive_fixture( archive_dir, [os.path.join(get_data_path(), data_filename)], meta_files=[ os.path.join(get_data_path(), m) for m in metadata_filenames ], filemap_behavior=MappingBehavior.ONE_TO_MANY, schema_filepath=schema_file, cache_filepath=archive_fixture.cache_file, hash_filepath=archive_fixture.hash_file, verbose=verbose, ) assert_captured_outerr(capsys.readouterr(), verbose, False) expected_frame = DataFrame( read_csv( os.path.join(get_data_path(), expected_frame_filename), dtype=str, index_col="Index", ) ) expected_frame.sort_index(inplace=True) actual_frame = DataFrame( read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index") ) actual_frame = actual_frame.reindex(columns=expected_frame.columns) actual_frame.sort_index(inplace=True) assert_captured_outerr(capsys.readouterr(), False, False) assert_frame_equal(expected_frame, actual_frame) assert os.path.exists(expected_hashfile) assert syphon.check( archive_fixture.cache_file, hash_filepath=expected_hashfile, verbose=verbose, ) def test_increment_with_metadata_with_schema( self, capsys: CaptureFixture, archive_dir: LocalPath, archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker", schema_file: Optional[LocalPath], verbose: bool, ): # List of (expected frame filename, data filename, metadata filename) tuples targets: List[Tuple[str, str, str]] = [ ( "iris-part-1-of-6-combined.csv", "iris-part-1-of-6.csv", "iris-part-1-of-6.meta", ), ("iris-part-1-2.csv", "iris-part-2-of-6.csv", "iris-part-2-of-6.meta"), ("iris-part-1-2-3.csv", "iris-part-3-of-6.csv", "iris-part-3-of-6.meta"), ("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv", "iris-part-4-of-6.meta"), ( "iris-part-1-2-3-4-5.csv", "iris-part-5-of-6.csv", "iris-part-5-of-6.meta", ), ("iris_plus.csv", "iris-part-6-of-6.csv", "iris-part-6-of-6.meta"), ] expected_hashfile = ( LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE) if archive_fixture.hash_file is None else archive_fixture.hash_file ) assert not os.path.exists(expected_hashfile) assert not os.path.exists(archive_fixture.cache_file) assert len(archive_dir.listdir()) == 0 expected_schemafile = ( archive_dir.join(syphon.schema.DEFAULT_FILE) if schema_file is None else schema_file ) assert not os.path.exists(expected_schemafile) syphon.init( SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile ) assert os.path.exists(expected_schemafile) for expected_frame_filename, data_filename, metadata_filename in targets: assert archive_fixture( archive_dir, [os.path.join(get_data_path(), data_filename)], meta_files=[os.path.join(get_data_path(), metadata_filename)], schema_filepath=schema_file, cache_filepath=archive_fixture.cache_file, hash_filepath=archive_fixture.hash_file, verbose=verbose, ) assert_captured_outerr(capsys.readouterr(), verbose, False) expected_frame = DataFrame( read_csv( os.path.join(get_data_path(), expected_frame_filename), dtype=str, index_col="Index", ) ) expected_frame.sort_index(inplace=True) actual_frame = DataFrame( read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index") ) actual_frame.sort_index(inplace=True) assert_captured_outerr(capsys.readouterr(), False, False) assert_frame_equal(expected_frame, actual_frame) assert os.path.exists(expected_hashfile) assert syphon.check( archive_fixture.cache_file, hash_filepath=expected_hashfile, verbose=verbose, ) def test_increment_with_metadata_without_schema( self, capsys: CaptureFixture, archive_dir: LocalPath, archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker", verbose: bool, ): # List of (expected frame filename, data filename, metadata filename) tuples targets: List[Tuple[str, str, str]] = [ ( "iris-part-1-of-6-combined.csv", "iris-part-1-of-6.csv", "iris-part-1-of-6.meta", ), ("iris-part-1-2.csv", "iris-part-2-of-6.csv", "iris-part-2-of-6.meta"), ("iris-part-1-2-3.csv", "iris-part-3-of-6.csv", "iris-part-3-of-6.meta"), ("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv", "iris-part-4-of-6.meta"), ( "iris-part-1-2-3-4-5.csv", "iris-part-5-of-6.csv", "iris-part-5-of-6.meta", ), ("iris_plus.csv", "iris-part-6-of-6.csv", "iris-part-6-of-6.meta"), ] expected_hashfile = ( LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE) if archive_fixture.hash_file is None else archive_fixture.hash_file ) assert not os.path.exists(expected_hashfile) assert not os.path.exists(archive_fixture.cache_file) assert len(archive_dir.listdir()) == 0 for expected_frame_filename, data_filename, metadata_filename in targets: assert archive_fixture( archive_dir, [os.path.join(get_data_path(), data_filename)], meta_files=[os.path.join(get_data_path(), metadata_filename)], cache_filepath=archive_fixture.cache_file, hash_filepath=archive_fixture.hash_file, verbose=verbose, ) assert_captured_outerr(capsys.readouterr(), verbose, False) expected_frame = DataFrame( read_csv( os.path.join(get_data_path(), expected_frame_filename), dtype=str, index_col="Index", ) ) expected_frame.sort_index(inplace=True) actual_frame = DataFrame( read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index") ) actual_frame.sort_index(inplace=True) assert_captured_outerr(capsys.readouterr(), False, False) assert_frame_equal(expected_frame, actual_frame) assert os.path.exists(expected_hashfile) assert syphon.check( archive_fixture.cache_file, hash_filepath=expected_hashfile, verbose=verbose, ) def test_increment_without_metadata_with_schema( self, capsys: CaptureFixture, archive_dir: LocalPath, archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker", schema_file: Optional[LocalPath], verbose: bool, ): # List of (expected frame filename, data filename) tuples targets: List[Tuple[str, str]] = [ ("iris-part-1-of-6-combined.csv", "iris-part-1-of-6-combined.csv"), ("iris-part-1-2.csv", "iris-part-2-of-6-combined.csv"), ("iris-part-1-2-3.csv", "iris-part-3-of-6-combined.csv"), ("iris-part-1-2-3-4.csv", "iris-part-4-of-6-combined.csv"), ("iris-part-1-2-3-4-5.csv", "iris-part-5-of-6-combined.csv"), ("iris_plus.csv", "iris-part-6-of-6-combined.csv"), ] expected_hashfile = ( LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE) if archive_fixture.hash_file is None else archive_fixture.hash_file ) assert not os.path.exists(expected_hashfile) assert not os.path.exists(archive_fixture.cache_file) assert len(archive_dir.listdir()) == 0 expected_schemafile = ( archive_dir.join(syphon.schema.DEFAULT_FILE) if schema_file is None else schema_file ) assert not os.path.exists(expected_schemafile) syphon.init( SortedDict({"0": "PetalColor", "1": "Species"}), expected_schemafile ) assert os.path.exists(expected_schemafile) for expected_frame_filename, data_filename in targets: assert archive_fixture( archive_dir, [os.path.join(get_data_path(), data_filename)], schema_filepath=schema_file, cache_filepath=archive_fixture.cache_file, hash_filepath=archive_fixture.hash_file, verbose=verbose, ) assert_captured_outerr(capsys.readouterr(), verbose, False) expected_frame = DataFrame( read_csv( os.path.join(get_data_path(), expected_frame_filename), dtype=str, index_col="Index", ) ) expected_frame.sort_index(inplace=True) actual_frame = DataFrame( read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index") ) actual_frame.sort_index(inplace=True) assert_captured_outerr(capsys.readouterr(), False, False) assert_frame_equal(expected_frame, actual_frame) assert os.path.exists(expected_hashfile) assert syphon.check( archive_fixture.cache_file, hash_filepath=expected_hashfile, verbose=verbose, ) def test_increment_without_metadata_without_schema( self, capsys: CaptureFixture, archive_dir: LocalPath, archive_fixture: "TestArchive.ArchiveCacheAndHashPassthruChecker", schema_file: Optional[LocalPath], verbose: bool, ): # List of (expected frame filename, data filename) tuples targets: List[Tuple[str, str]] = [ ("iris-part-1-of-6-combined.csv", "iris-part-1-of-6.csv"), ("iris-part-1-2.csv", "iris-part-2-of-6.csv"), ("iris-part-1-2-3.csv", "iris-part-3-of-6.csv"), ("iris-part-1-2-3-4.csv", "iris-part-4-of-6.csv"), ("iris-part-1-2-3-4-5.csv", "iris-part-5-of-6.csv"), ("iris_plus.csv", "iris-part-6-of-6.csv"), ] expected_hashfile = ( LocalPath(archive_fixture.cache_file).dirpath(DEFAULT_HASH_FILE) if archive_fixture.hash_file is None else archive_fixture.hash_file ) assert not os.path.exists(expected_hashfile) assert not os.path.exists(archive_fixture.cache_file) assert len(archive_dir.listdir()) == 0 for expected_frame_filename, data_filename in targets: assert archive_fixture( archive_dir, [os.path.join(get_data_path(), data_filename)], cache_filepath=archive_fixture.cache_file, hash_filepath=archive_fixture.hash_file, verbose=verbose, ) assert_captured_outerr(capsys.readouterr(), verbose, False) expected_frame = DataFrame( read_csv( os.path.join(get_data_path(), expected_frame_filename), dtype=str, index_col="Index", ) ) del expected_frame["Species"] del expected_frame["PetalColor"] expected_frame.sort_index(inplace=True) actual_frame = DataFrame( read_csv(str(archive_fixture.cache_file), dtype=str, index_col="Index") ) actual_frame.sort_index(inplace=True) assert_captured_outerr(capsys.readouterr(), False, False) assert_frame_equal(expected_frame, actual_frame) assert os.path.exists(expected_hashfile) assert syphon.check( archive_fixture.cache_file, hash_filepath=expected_hashfile, verbose=verbose, ) def test_no_datafiles( self, capsys: CaptureFixture, archive_dir: LocalPath, verbose: bool ): assert not syphon.archive(archive_dir, [], verbose=verbose) assert_captured_outerr(capsys.readouterr(), verbose, False) def test_without_metadata_with_schema( self, capsys: CaptureFixture, archive_params: Tuple[str, SortedDict], archive_dir: LocalPath, overwrite: bool, verbose: bool, ): filename: str schema: SortedDict filename, schema = archive_params datafile = os.path.join(get_data_path(), filename) schemafile = os.path.join(archive_dir, syphon.schema.DEFAULT_FILE) syphon.init(schema, schemafile) expected_df = DataFrame(read_csv(datafile, dtype=str)) expected_df.sort_values(list(expected_df.columns), inplace=True) expected_df.reset_index(drop=True, inplace=True) expected_paths: SortedList = _get_expected_paths( archive_dir, schema, expected_df, filename ) if overwrite: for e in expected_paths: os.makedirs(os.path.dirname(e), exist_ok=True) with open(e, mode="w") as fd: fd.write(rand_string()) assert syphon.archive( archive_dir, [datafile], schema_filepath=schemafile, overwrite=overwrite, verbose=verbose, ) assert not os.path.exists(os.path.join(os.path.dirname(datafile), "#lock")) actual_frame = DataFrame() actual_paths = SortedList() for root, _, files in os.walk(archive_dir): for f in files: if ".csv" in f: filepath: str = os.path.join(root, f) actual_paths.add(filepath) actual_frame = concat( [actual_frame, DataFrame(read_csv(filepath, dtype=str))] ) actual_frame.sort_values(list(actual_frame.columns), inplace=True) actual_frame.reset_index(drop=True, inplace=True) assert expected_paths == actual_paths
assert_frame_equal(expected_df, actual_frame)
pandas.testing.assert_frame_equal
# -*- coding: utf-8 -*- """Copy of final.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1JsZAdNd67Fcn-S5prbt1w33R4wxE_9ep """ # Commented out IPython magic to ensure Python compatibility. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import LabelEncoder from tqdm import tqdm_notebook as tqdm # %matplotlib inline """## Data loading """ application_train = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/application_train.csv") application_test = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/application_test.csv") # pos_cash = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/POS_CASH_balance.csv") # installments = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/installments_payments.csv") # credit_df = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/credit_card_balance.csv"); # b=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/bureau.csv") # bur=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/bureau_balance.csv") # prev=pd.read_csv("/content/drive/MyDrive/Home Credit/Data/previous_application.csv") print("application_train.shape:",application_train.shape) print("application_test.shape :",application_test.shape) train_id = application_train["SK_ID_CURR"] train_target = application_train["TARGET"] test_id = application_test["SK_ID_CURR"] application_train.head() application_test.head() """we have one extra column in the application_train data , i.e TARGET """ application_train['TARGET'].value_counts() fig = plt.figure(figsize =(15, 5)) plt.subplot(1,2,1) plt.pie(application_train["TARGET"].value_counts(),labels = ["TARGET=0","TARGET=1"],autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """Imbalanced dataset""" application_train.dtypes.value_counts() obj_type = application_train.dtypes[application_train.dtypes=='object'].index float_type = application_train.dtypes[application_train.dtypes=='float64'].index int_type = application_train.dtypes[application_train.dtypes=='int64'].index def missing_data(data): total = data.isnull().sum().sort_values(ascending = False) percent = (data.isnull().sum()/data.isnull().count()*100).sort_values(ascending = False) return pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) """# Handling categorical data """ print(obj_type) label_list = [] one_hot_list = [] drop_list = [] application_train[obj_type].head() """Missing Values in categorical data""" missing_data(application_train[obj_type]) application_train["CODE_GENDER"].value_counts() application_train['CODE_GENDER'].replace('XNA','F', inplace=True) fig = plt.figure(figsize =(15, 5)) plt.subplot(1,2,1) plt.pie(application_train["CODE_GENDER"].value_counts(),labels = ["Female","Male"],autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="CODE_GENDER",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """Observation : Male having difficulty in repaying is high compared to Female from the above graph More No of Female Applicants than Male Applicants. """ def plot_hist(col): plt.suptitle(col, fontsize=30) application_train.loc[application_train['TARGET'] == 0, col].hist( ) application_train.loc[application_train['TARGET'] == 1, col].hist( ) plt.legend(['TARGET(0)', 'TARGET(1)']) plt.show() plot_hist("CODE_GENDER") """# NAME_CONTRACT_TYPE""" fig = plt.figure(figsize =(15, 5)) plt.subplot(1,2,1) plt.pie(application_train["NAME_CONTRACT_TYPE"].value_counts(),labels = ["Cash_Loans","Revolving_Loans"],autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="NAME_CONTRACT_TYPE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """Cash Loans are More than Revolving loans . ## FLAG_OWN_CAR """ fig = plt.figure(figsize =(15, 3)) plt.subplot(1,2,1) plt.pie(application_train["FLAG_OWN_CAR"].value_counts(),labels = ["YES","NO"],autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="FLAG_OWN_CAR",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## FLAG_OWN_REALTY""" fig = plt.figure(figsize =(15, 3)) plt.subplot(1,2,1) plt.pie(application_train["FLAG_OWN_REALTY"].value_counts(),labels = ["YES","NO"],autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="FLAG_OWN_REALTY",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## NAME_EDUCATION_TYPE""" application_train["NAME_EDUCATION_TYPE"].value_counts().index fig = plt.figure(figsize =(15, 3)) plt.subplot(1,2,1) plt.pie(application_train["NAME_EDUCATION_TYPE"].value_counts(),labels =['Secondary / secondary special', 'Higher education', 'Incomplete higher', 'Lower secondary', 'Academic degree'],autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="NAME_EDUCATION_TYPE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## NAME_TYPE_SUITE""" application_train["NAME_TYPE_SUITE"].value_counts().index fig = plt.figure(figsize =(15, 5)) plt.subplot(1,2,1) plt.pie(application_train["NAME_TYPE_SUITE"].value_counts(),labels =['Unaccompanied', 'Family', 'Spouse, partner', 'Children', 'Other_B', 'Other_A', 'Group of people'],autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="NAME_TYPE_SUITE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## NAME_INCOME_TYPE""" l=application_train["NAME_INCOME_TYPE"].value_counts() l fig = plt.figure(figsize =(20, 5)) plt.subplot(1,2,1) plt.pie(application_train["NAME_INCOME_TYPE"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="NAME_INCOME_TYPE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## NAME_FAMILY_STATUS""" l=application_train["NAME_FAMILY_STATUS"].value_counts() l fig = plt.figure(figsize =(15, 5)) plt.subplot(1,2,1) plt.pie(application_train["NAME_FAMILY_STATUS"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="NAME_FAMILY_STATUS",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## NAME_HOUSING_TYPE""" l= application_train["NAME_HOUSING_TYPE"].value_counts() l fig = plt.figure(figsize =(20, 5)) plt.subplot(1,2,1) plt.pie(application_train["NAME_HOUSING_TYPE"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="NAME_HOUSING_TYPE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## OCCUPATION_TYPE""" l=application_train["OCCUPATION_TYPE"].value_counts() l """## WEEKDAY_APPR_PROCESS_START """ l=application_train["WEEKDAY_APPR_PROCESS_START"].value_counts() l fig = plt.figure(figsize =(20, 5)) plt.subplot(1,2,1) plt.pie(application_train["WEEKDAY_APPR_PROCESS_START"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="WEEKDAY_APPR_PROCESS_START",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## ORGANIZATION_TYPE""" l=application_train["ORGANIZATION_TYPE"].value_counts() l """## FONDKAPREMONT_MODE""" l=application_train["FONDKAPREMONT_MODE"].value_counts() l fig = plt.figure(figsize =(20, 5)) plt.subplot(1,2,1) plt.pie(application_train["FONDKAPREMONT_MODE"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="FONDKAPREMONT_MODE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## HOUSETYPE_MODE""" l=application_train["HOUSETYPE_MODE"].value_counts() l fig = plt.figure(figsize =(15, 5)) plt.subplot(1,2,1) plt.pie(application_train["HOUSETYPE_MODE"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="HOUSETYPE_MODE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## WALLSMATERIAL_MODE """ l=application_train["WALLSMATERIAL_MODE"].value_counts() l fig = plt.figure(figsize =(15, 5)) plt.subplot(1,2,1) plt.pie(application_train["WALLSMATERIAL_MODE"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="WALLSMATERIAL_MODE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## EMERGENCYSTATE_MODE""" l=application_train["EMERGENCYSTATE_MODE"].value_counts() l fig = plt.figure(figsize =(10, 5)) plt.subplot(1,2,1) plt.pie(application_train["EMERGENCYSTATE_MODE"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="EMERGENCYSTATE_MODE",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() obj_type label_list = ['NAME_CONTRACT_TYPE', 'CODE_GENDER', 'FLAG_OWN_CAR','FLAG_OWN_REALTY', 'ORGANIZATION_TYPE'] one_hot_list = ['NAME_TYPE_SUITE', 'NAME_INCOME_TYPE','NAME_EDUCATION_TYPE','NAME_FAMILY_STATUS','NAME_HOUSING_TYPE','OCCUPATION_TYPE', 'WEEKDAY_APPR_PROCESS_START','FONDKAPREMONT_MODE','WALLSMATERIAL_MODE'] drop_list = ["HOUSETYPE_MODE","EMERGENCYSTATE_MODE"] le = LabelEncoder() for x in label_list: le.fit(application_train[x]) application_train[x] = le.transform(application_train[x]) application_test[x] = le.transform(application_test[x]) application_train.drop(drop_list,axis=1,inplace=True) application_test.drop(drop_list,axis=1,inplace=True) train_id = application_train["SK_ID_CURR"] test_id = application_test["SK_ID_CURR"] train_target = application_train["TARGET"] application_train.drop(["SK_ID_CURR","TARGET"],axis=1,inplace=True) application_test.drop(["SK_ID_CURR"],axis=1,inplace=True) print(application_train.shape) print(application_test.shape) obj_type = application_train.dtypes[application_train.dtypes=='object'].index obj_type application_train = pd.get_dummies(application_train,columns=one_hot_list) application_test = pd.get_dummies(application_test,columns=one_hot_list) print(application_train.shape) print(application_test.shape) application_train, application_test = application_train.align(application_test, join ='inner', axis = 1) print(application_train.shape) print(application_test.shape) application_test["SK_ID_CURR"] = test_id application_train["SK_ID_CURR"] = train_id application_train["TARGET"] = train_target """## Handling NUMERICAL DATA""" application_train[int_type].head() for x in int_type: print(x) """## CNT_CHILDREN""" l=application_train['CNT_CHILDREN'].value_counts() l fig = plt.figure(figsize =(10, 5)) plt.subplot(1,2,1) plt.pie(application_train["CNT_CHILDREN"].value_counts(),labels =l.index,autopct='%1.2f%%') plt.subplot(1,2,2) sns.countplot(x="CNT_CHILDREN",hue="TARGET",palette ="Set2",data=application_train) plt.tight_layout() plt.show() """## DAYS_BIRTH Client's age in days at the time of application ,time only relative to the application """ application_train['DAYS_BIRTH'].apply(lambda x : -1*x/365).plot.hist() application_train['DAYS_BIRTH'].apply(lambda x : -1*x/365).describe() """## DAYS_EMPLOYED""" application_train['DAYS_EMPLOYED'].describe() application_train['DAYS_EMPLOYED'].apply(lambda x : -1*x/365).describe() """here we see that max no of days employed showing 1000 years and showing positive . these are outliers.""" application_train['DAYS_EMPLOYED'].apply(lambda x : x/365).plot.hist() """So the DAYS_EMPLOYED greater than the 100 years are considered as outliers , we must Delete the Outliers """ application_train['DAYS_EMPLOYED'].apply(lambda x : x/365).value_counts() """35869 rows has the Days employed value 1000 years ,changing these rows to nan""" application_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True) application_test['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True) application_train['DAYS_EMPLOYED'].plot.hist() """## FLAG_MOBIL : Did client provide mobile phone (1=YES, 0=NO) """ l=application_train['FLAG_MOBIL'].value_counts() l fig = plt.figure(figsize =(8, 3)) plt.subplot(1,2,1) plt.pie(application_train["FLAG_MOBIL"].value_counts(),labels =l.index) # plt.subplot(1,2,2) # sns.countplot(x="CNT_CHILDREN",hue="TARGET",palette ="Set2",data=application_train) # plt.tight_layout() plt.show() application_train['FLAG_MOBIL'].value_counts() application_test['FLAG_MOBIL'].value_counts() """so droping the column FLAG_MOBIL""" application_train.drop(['FLAG_MOBIL'],axis=1,inplace=True) application_test.drop(['FLAG_MOBIL'],axis=1,inplace=True) """## FLAG_DOCUMENT_# The below are the documents may the necessary documents ,They are submitted by all most all, so they do not contribute to any information in prediction """ d=['FLAG_DOCUMENT_2','FLAG_DOCUMENT_4','FLAG_DOCUMENT_6','FLAG_DOCUMENT_7','FLAG_DOCUMENT_9','FLAG_DOCUMENT_10','FLAG_DOCUMENT_11','FLAG_DOCUMENT_12','FLAG_DOCUMENT_13', 'FLAG_DOCUMENT_14','FLAG_DOCUMENT_15','FLAG_DOCUMENT_16','FLAG_DOCUMENT_17','FLAG_DOCUMENT_18','FLAG_DOCUMENT_19', 'FLAG_DOCUMENT_20','FLAG_DOCUMENT_21'] #for example : l=application_train['FLAG_DOCUMENT_4'].value_counts() l fig = plt.figure(figsize =(8, 3)) plt.subplot(1,2,1) plt.pie(application_train["FLAG_DOCUMENT_4"].value_counts(),labels =l.index) plt.show() l=application_train['FLAG_DOCUMENT_9'].value_counts() l fig = plt.figure(figsize =(8, 3)) plt.subplot(1,2,1) plt.pie(application_train["FLAG_DOCUMENT_9"].value_counts(),labels =l.index) plt.show() application_train.drop(d,axis=1,inplace=True) application_test.drop(d,axis=1,inplace=True) print(application_train.shape) print(application_test.shape) """## Handling Float""" def plot_kde(col): plt.suptitle(col, fontsize=30) sns.kdeplot(application_train.loc[application_train['TARGET'] == 0, col],label='TARGET=0') sns.kdeplot(application_train.loc[application_train['TARGET'] == 1, col],label='TARGET=1') plt.legend(['TARGET(0)', 'TARGET(1)']) plt.show() def plot_hist(col): plt.suptitle(col, fontsize=30) application_train.loc[application_train['TARGET'] == 0, col].hist( ) application_train.loc[application_train['TARGET'] == 1, col].hist( ) plt.legend(['TARGET(0)', 'TARGET(1)']) plt.show() def kde_hist(col): plt.suptitle(col, fontsize=30) fig, ax = plt.subplots(1, 2, figsize=(16, 8)) sns.kdeplot(application_train.loc[application_train['TARGET'] == 0,col], ax=ax[0], label='TARGET(0)') sns.kdeplot(application_train.loc[application_train['TARGET'] == 1,col], ax=ax[0], label='TARGET(1)') ax[0].set_title('KDE plot') ax[1].set_title('Histogram plot') application_train.loc[application_train['TARGET'] == 0, col].hist(ax=ax[1]) application_train.loc[application_train["TARGET"] == 1, col].hist(ax=ax[1]) ax[1].legend(['TARGET(0)', 'TARGET(1)']) plt.show() """## AMT_INCOME_TOTAL""" missing_data(pd.DataFrame(application_train["AMT_INCOME_TOTAL"])) application_train["AMT_INCOME_TOTAL"].describe() plt.figure(figsize=(5,5)) sns.boxplot(application_train["AMT_INCOME_TOTAL"]) kde_hist("AMT_INCOME_TOTAL") """we can observe that higher income does not have any problem in repaying the loan ## AMT_CREDIT : Credit amount of the loan """ missing_data(pd.DataFrame(application_train["AMT_CREDIT"])) plot_kde("AMT_CREDIT") """## AMT_ANNUITY : Loan annuity""" missing_data(pd.DataFrame(application_train["AMT_ANNUITY"])) plot_kde("AMT_ANNUITY") plt.figure(figsize=(5,5)) sns.boxplot(application_train["AMT_ANNUITY"]) """## OWN_CAR_AGE :Age of client's car""" missing_data(pd.DataFrame(application_train["OWN_CAR_AGE"])) application_train["OWN_CAR_AGE"].describe() plot_kde("OWN_CAR_AGE") """considering nan value means no car ,so no age so filling with zero.""" application_train["OWN_CAR_AGE"].fillna(0,inplace=True) application_test["OWN_CAR_AGE"].fillna(0,inplace=True) """## CNT_FAM_MEMBERS : How many family members does client have""" missing_data(pd.DataFrame(application_train["CNT_FAM_MEMBERS"])) application_train["CNT_FAM_MEMBERS"].value_counts() plot_hist("CNT_FAM_MEMBERS") """we can observer that more the family members difficulty in paying loan . ## EXT_SOURCE_1 ,EXT_SOURCE_2,EXT_SOURCE_3 : Normalized score from external data source """ missing_data(pd.DataFrame(application_train[["EXT_SOURCE_1","EXT_SOURCE_2","EXT_SOURCE_3"]])) # #droping EXT_SOURCE_1 # application_train.drop(["EXT_SOURCE_1"],axis=1,inplace=True) # application_test.drop(["EXT_SOURCE_1"],axis=1,inplace=True) kde_hist("EXT_SOURCE_2") kde_hist("EXT_SOURCE_3") """## TOTALAREA_MODE""" missing_data(pd.DataFrame(application_train["TOTALAREA_MODE"])) application_train["TOTALAREA_MODE"].describe() application_train["TOTALAREA_MODE"].fillna(0,inplace=True) application_test["TOTALAREA_MODE"].fillna(0,inplace=True) kde_hist("TOTALAREA_MODE") """## OBS_30_CNT_SOCIAL_CIRCLE ,OBS_60 _CNT_SOCIAL_CIRCLE How many observation of client's social surroundings with observable 30 DPD (days past due) default How many observation of client's social surroundings with observable 60 DPD (days past due) default """ missing_data(pd.DataFrame(application_train[["OBS_30_CNT_SOCIAL_CIRCLE","OBS_60_CNT_SOCIAL_CIRCLE"]])) application_train["OBS_30_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True) application_test["OBS_30_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True) application_train["OBS_60_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True) application_test["OBS_60_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True) application_train["OBS_30_CNT_SOCIAL_CIRCLE"].value_counts() sns.boxplot(application_train["OBS_30_CNT_SOCIAL_CIRCLE"]) application_train["OBS_30_CNT_SOCIAL_CIRCLE"] = application_train["OBS_30_CNT_SOCIAL_CIRCLE"].apply(lambda x: 25 if x>25 else x) application_test["OBS_30_CNT_SOCIAL_CIRCLE"]= application_test["OBS_30_CNT_SOCIAL_CIRCLE"].apply(lambda x: 25 if x>25 else x) application_train["OBS_60_CNT_SOCIAL_CIRCLE"] = application_train["OBS_60_CNT_SOCIAL_CIRCLE"].apply(lambda x: 25 if x>25 else x) application_test["OBS_60_CNT_SOCIAL_CIRCLE"]= application_test["OBS_60_CNT_SOCIAL_CIRCLE"].apply(lambda x: 25 if x>25 else x) sns.boxplot(application_train["OBS_60_CNT_SOCIAL_CIRCLE"]) """## DEF_30_CNT_SOCIAL_CIRCLE, DEF_60_CNT_SOCIAL_CIRCLE, How many observation of client's social surroundings defaulted on 30 (days past due) DPD How many observation of client's social surroundings defaulted on 60 (days past due) DPD """ missing_data(pd.DataFrame(application_train[["DEF_30_CNT_SOCIAL_CIRCLE","DEF_60_CNT_SOCIAL_CIRCLE"]])) application_train["DEF_30_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True) application_test["DEF_30_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True) application_train["DEF_60_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True) application_test["DEF_60_CNT_SOCIAL_CIRCLE"].fillna(0,inplace=True) application_train["DEF_30_CNT_SOCIAL_CIRCLE"].value_counts() application_train["DEF_60_CNT_SOCIAL_CIRCLE"].value_counts() sns.boxplot(application_train["DEF_30_CNT_SOCIAL_CIRCLE"]) application_train["DEF_30_CNT_SOCIAL_CIRCLE"] = application_train["DEF_30_CNT_SOCIAL_CIRCLE"].apply(lambda x: 5 if x>4 else x) application_test["DEF_30_CNT_SOCIAL_CIRCLE"]= application_test["DEF_30_CNT_SOCIAL_CIRCLE"].apply(lambda x: 5 if x>4 else x) application_train["DEF_60_CNT_SOCIAL_CIRCLE"] = application_train["DEF_60_CNT_SOCIAL_CIRCLE"].apply(lambda x: 5 if x>4 else x) application_test["DEF_60_CNT_SOCIAL_CIRCLE"]= application_test["DEF_60_CNT_SOCIAL_CIRCLE"].apply(lambda x: 5 if x>4 else x) """## DAYS_LAST_PHONE_CHANGE : How many days before application did client change phone """ missing_data(pd.DataFrame(application_train["DAYS_LAST_PHONE_CHANGE"])) application_train["DAYS_LAST_PHONE_CHANGE"].describe() application_train["DAYS_LAST_PHONE_CHANGE"]=application_train["DAYS_LAST_PHONE_CHANGE"].apply(lambda x: x*-1) application_test["DAYS_LAST_PHONE_CHANGE"]=application_test["DAYS_LAST_PHONE_CHANGE"].apply(lambda x: x*-1) kde_hist("DAYS_LAST_PHONE_CHANGE") """## 'AMT_REQ_CREDIT_BUREAU_HOUR', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_WEEK', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR' Number of enquiries to Credit Bureau about the client _____ hour before application """ AMT_REQ = ['AMT_REQ_CREDIT_BUREAU_HOUR','AMT_REQ_CREDIT_BUREAU_DAY','AMT_REQ_CREDIT_BUREAU_WEEK','AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR'] missing_data(application_train[AMT_REQ]) application_train[AMT_REQ]=application_train[AMT_REQ].fillna(0) application_test[AMT_REQ]=application_train[AMT_REQ].fillna(0) AVG = [ 'APARTMENTS_AVG', 'BASEMENTAREA_AVG', 'YEARS_BEGINEXPLUATATION_AVG','YEARS_BUILD_AVG', 'COMMONAREA_AVG', 'ELEVATORS_AVG', 'ENTRANCES_AVG', 'FLOORSMAX_AVG', 'FLOORSMIN_AVG', 'LANDAREA_AVG', 'LIVINGAPARTMENTS_AVG', 'LIVINGAREA_AVG', 'NONLIVINGAPARTMENTS_AVG','NONLIVINGAREA_AVG'] MODE = ['APARTMENTS_MODE','BASEMENTAREA_MODE', 'YEARS_BEGINEXPLUATATION_MODE', 'YEARS_BUILD_MODE','COMMONAREA_MODE','ELEVATORS_MODE', 'ENTRANCES_MODE', 'FLOORSMAX_MODE', 'FLOORSMIN_MODE', 'LANDAREA_MODE', 'LIVINGAPARTMENTS_MODE', 'LIVINGAREA_MODE', 'NONLIVINGAPARTMENTS_MODE', 'NONLIVINGAREA_MODE'] MEDI = ['APARTMENTS_MEDI','BASEMENTAREA_MEDI','YEARS_BEGINEXPLUATATION_MEDI','YEARS_BUILD_MEDI','COMMONAREA_MEDI','ELEVATORS_MEDI','ENTRANCES_MEDI','FLOORSMAX_MEDI', 'FLOORSMIN_MEDI','LANDAREA_MEDI','LIVINGAPARTMENTS_MEDI','LIVINGAREA_MEDI','NONLIVINGAPARTMENTS_MEDI','NONLIVINGAREA_MEDI'] missing_data(application_train[AVG]) missing_data(application_train[MODE]) missing_data(application_train[MEDI]) obj_type = application_train.dtypes[application_train.dtypes=='object'].index float_type = application_train.dtypes[application_train.dtypes=='float64'].index int_type = application_train.dtypes[application_train.dtypes=='int64'].index missing_data(application_train[float_type]).head(50) # application_train.to_csv("/content/drive/MyDrive/Home Credit/preprocessed_data/app_train.csv",index=False) # application_test.to_csv("/content/drive/MyDrive/Home Credit/preprocessed_data/app_test.csv",index=False) """## CREDIT_CARD""" credit_df = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/credit_card_balance.csv"); credit_features_train = application_train[["SK_ID_CURR","TARGET"]] credit_features_test =pd.DataFrame(application_test["SK_ID_CURR"]) credit_df.dtypes.value_counts() def plot_kde_2(col): plt.suptitle(col, fontsize=30) sns.kdeplot(credit_features_train.loc[credit_features_train['TARGET'] == 0, col],label='TARGET=0') sns.kdeplot(credit_features_train.loc[credit_features_train['TARGET'] == 1, col],label='TARGET=1') plt.legend(['TARGET(0)', 'TARGET(1)']) plt.show() def plot_hist_2(col): plt.suptitle(col, fontsize=30) credit_features_train.loc[credit_features_train['TARGET'] == 0, col].hist( ) credit_features_train.loc[credit_features_train['TARGET'] == 1, col].hist( ) plt.legend(['TARGET(0)', 'TARGET(1)']) plt.show() def kde_hist_2(col): plt.suptitle(col, fontsize=30) fig, ax = plt.subplots(1, 2, figsize=(16, 8)) sns.kdeplot(credit_features_train.loc[credit_features_train['TARGET'] == 0,col], ax=ax[0], label='TARGET(0)') sns.kdeplot(credit_features_train.loc[credit_features_train['TARGET'] == 1,col], ax=ax[0], label='TARGET(1)') ax[0].set_title('KDE plot') ax[1].set_title('Histogram plot') credit_features_train.loc[credit_features_train['TARGET'] == 0, col].hist(ax=ax[1]) credit_features_train.loc[credit_features_train["TARGET"] == 1, col].hist(ax=ax[1]) ax[1].legend(['TARGET(0)', 'TARGET(1)']) plt.show() def plot_count_2(col): sns.countplot(x=col, data=credit_features_train) missing_data(credit_df).head(23) obj_type = credit_df.dtypes[credit_df.dtypes=='object'].index float_type = credit_df.dtypes[credit_df.dtypes=='float64'].index int_type = credit_df.dtypes[credit_df.dtypes=='int64'].index credit_df[obj_type].head() """## NO of previous loans per coutomer""" NO_LOANS = credit_df.groupby(by = ['SK_ID_CURR'])['SK_ID_PREV'].nunique().reset_index().rename(index = str, columns = {'SK_ID_PREV': 'NO_LOANS'}) NO_LOANS["NO_LOANS"].value_counts() credit_features_train = credit_features_train.merge(NO_LOANS,on=["SK_ID_CURR"],how="left") credit_features_test = credit_features_test.merge(NO_LOANS,on=["SK_ID_CURR"],how="left") print(credit_features_train.shape) print(credit_features_test.shape) credit_features_train["NO_LOANS"].value_counts() credit_features_train["NO_LOANS"].fillna(0,inplace=True) credit_features_test["NO_LOANS"].fillna(0,inplace=True) fig = plt.figure(figsize =(15, 3)) plt.subplot(1,2,1) plt.pie(credit_features_train["NO_LOANS"].value_counts(),colors=['C1','C2','C3','C4','C5'], labels = ["NO_LOANS=0","NO_LOANS=1","NO_LOANS=2","NO_LOANS=3","NO_LOANS=4"]) plt.subplot(1,2,2) sns.countplot(x="NO_LOANS",hue="TARGET",palette = "Set2",data=credit_features_train) plt.tight_layout() plt.show() credit_df[int_type].head() missing_data(credit_df[int_type]) AVG_DPD = credit_df.groupby(by= ['SK_ID_CURR'])['SK_DPD'].mean().reset_index().rename(index = str, columns = {'SK_DPD': 'AVG_DPD'}) credit_features_train = credit_features_train.merge(AVG_DPD, on = ['SK_ID_CURR'], how = 'left') credit_features_test = credit_features_test.merge(AVG_DPD, on = ['SK_ID_CURR'], how = 'left') print(credit_features_train.shape) print(credit_features_test.shape) credit_features_test.head() credit_features_train.fillna(0,inplace=True) credit_features_test.fillna(0,inplace=True) missing_data(credit_df[float_type]) credit_df[float_type].head() """## NO OF INSTALMENTS PAID BY CUSTOMER PER LOAN CNT_INSTALMENT_MATURE_CUM : Gives Number of paid installments on the previous credit. """ grp = credit_df.groupby(by = ['SK_ID_CURR', 'SK_ID_PREV'])['CNT_INSTALMENT_MATURE_CUM'].max().reset_index().rename(index = str, columns = {'CNT_INSTALMENT_MATURE_CUM': 'NO_INSTALMENTS'}) grp1 = grp.groupby(by = ['SK_ID_CURR'])['NO_INSTALMENTS'].sum().reset_index().rename(index = str, columns = {'NO_INSTALMENTS': 'TOTAL_INSTALMENTS'}) credit_features_train = credit_features_train.merge(grp1,on = ['SK_ID_CURR'], how = 'left') credit_features_test = credit_features_test.merge(grp1,on=['SK_ID_CURR'],how='left') credit_features_train.fillna(0,inplace=True) credit_features_test.fillna(0,inplace=True) print(credit_features_train.shape) print(credit_features_test.shape) credit_features_train.drop(["SK_ID_CURR","TARGET"],axis=1,inplace=True) credit_features_test.drop(["SK_ID_CURR"],axis=1,inplace=True) application_train=pd.concat([application_train, credit_features_train], axis=1) application_test=pd.concat([application_test, credit_features_test],axis=1) del credit_df del credit_features_test del credit_features_train """## installments""" installments = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/installments_payments.csv") installment_train = application_train[["SK_ID_CURR","TARGET"]] installment_test =pd.DataFrame(application_test["SK_ID_CURR"]) installments.shape missing_data(installments) installments.fillna(0,inplace=True) installments['Days_Extra_Taken']=installments['DAYS_INSTALMENT']-installments['DAYS_ENTRY_PAYMENT'] installments['AMT_INSTALMENT_difference']=installments['AMT_INSTALMENT']-installments['AMT_PAYMENT'] installments.drop(["DAYS_INSTALMENT","AMT_INSTALMENT"],axis=1,inplace=True) installments.drop(["DAYS_ENTRY_PAYMENT","AMT_PAYMENT"],axis=1,inplace=True) temp = installments.drop(["NUM_INSTALMENT_VERSION","NUM_INSTALMENT_NUMBER"],axis=1) grp = temp.groupby(["SK_ID_CURR"])["Days_Extra_Taken","AMT_INSTALMENT_difference"].max().reset_index().rename(index = str, columns = {"Days_Extra_Taken": 'MAX_Days_Extra_Taken',"AMT_INSTALMENT_difference":"MAX_AMT_INSTALMENT_difference"}) grp["MAX_Days_Extra_Taken"].describe() installment_train = installment_train.merge(grp,on=["SK_ID_CURR"],how="left") installment_test = installment_test.merge(grp,on=["SK_ID_CURR"],how="left") del temp del grp del grp1 print(installment_test.shape) print(installment_train.shape) installment_test.fillna(0,inplace=True) installment_train.fillna(0,inplace=True) missing_data(installment_train) installment_train.drop(["SK_ID_CURR","TARGET"],axis=1,inplace=True) installment_test.drop(["SK_ID_CURR"],axis=1,inplace=True) application_train=pd.concat([application_train, installment_train],axis=1) application_test=pd.concat([application_test, installment_test], axis=1) print(application_train.shape) print(application_test.shape) del installment_test del installment_train del installments """##POS_CASH :""" pos_cash = pd.read_csv("/content/drive/MyDrive/Home Credit/Data/POS_CASH_balance.csv") pos_train = application_train[["SK_ID_CURR","TARGET"]] pos_test =pd.DataFrame(application_test["SK_ID_CURR"]) temp1 = pos_cash.groupby(by= ['SK_ID_CURR'])['SK_DPD'].mean().reset_index().rename(index = str, columns = {'SK_DPD': 'AVG_DPD'}) temp2 = pos_cash.groupby(by= ['SK_ID_CURR'])['SK_DPD_DEF'].mean().reset_index().rename(index = str, columns = {'SK_DPD_DEF': 'AVG_DPD_DEF'}) pos_train = pos_train.merge(temp1, on = ['SK_ID_CURR'], how = 'left') pos_test = pos_test.merge(temp1, on = ['SK_ID_CURR'], how = 'left') pos_train = pos_train.merge(temp2, on = ['SK_ID_CURR'], how = 'left') pos_test = pos_test.merge(temp2, on = ['SK_ID_CURR'], how = 'left') del temp1 del temp2 """# CNT_INSTALMENT_MATURE_CUM : Gives Number of paid installments on the previous credit.""" grp = pos_cash.groupby(by = ['SK_ID_CURR', 'SK_ID_PREV'])['CNT_INSTALMENT'].max().reset_index().rename(index = str, columns = {'CNT_INSTALMENT': 'POS_PAID_INSTALMENTS'}) grp1 = grp.groupby(by = ['SK_ID_CURR'])['POS_PAID_INSTALMENTS'].sum().reset_index().rename(index = str, columns = {'POS_PAID_INSTALMENTS': 'POS_TOTAL_PAID_INSTALMENTS'}) grp2 = pos_cash.groupby(by = ['SK_ID_CURR', 'SK_ID_PREV'])['CNT_INSTALMENT_FUTURE'].min().reset_index().rename(index = str, columns = {'CNT_INSTALMENT_FUTURE': 'POS_NOTPAID_INSTALMENTS'}) grp3 = grp2.groupby(by = ['SK_ID_CURR'])['POS_NOTPAID_INSTALMENTS'].sum().reset_index().rename(index = str, columns = {'POS_NOTPAID_INSTALMENTS': 'POS_TOTAL_NOTPAID_INSTALMENTS'}) pos_train = pos_train.merge(grp1, on = ['SK_ID_CURR'], how = 'left') pos_test = pos_test.merge(grp1, on = ['SK_ID_CURR'], how = 'left') pos_train = pos_train.merge(grp3, on = ['SK_ID_CURR'], how = 'left') pos_test = pos_test.merge(grp3, on = ['SK_ID_CURR'], how = 'left') del grp1 del grp2 del grp3 del grp POS_NO_LOANS = pos_cash.groupby(by = ['SK_ID_CURR'])['SK_ID_PREV'].nunique().reset_index().rename(index = str, columns = {'SK_ID_PREV': 'NO_LOANS'}) pos_train = pos_train.merge(POS_NO_LOANS, on = ['SK_ID_CURR'], how = 'left') pos_test = pos_test.merge(POS_NO_LOANS, on = ['SK_ID_CURR'], how = 'left') del POS_NO_LOANS print(pos_train.shape) print(pos_test.shape) pos_train.fillna(0,inplace=True) pos_test.fillna(0,inplace=True) pos_train.drop(["SK_ID_CURR","TARGET"],axis=1,inplace=True) pos_test.drop(["SK_ID_CURR"],axis=1,inplace=True) application_train=pd.concat([application_train, pos_train],axis=1) application_test=
pd.concat([application_test, pos_test], axis=1)
pandas.concat
import re import pylatex import dash import dash_table import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State import sklearn.metrics as sm import pandas as pd import scipy.stats as sps from scipy.sparse import issparse from sklearn.feature_selection import chi2 import plotly.graph_objects as go import plotly.express as px from .text.linear_text import * from .text.roc_text import * from .DashExceptions import ModelChoiceException from .Dashboard import Dashboard from ..models.LinearRegressionModel import * from ..models.LogisticRegressionModel import * class PredictionDashboard(Dashboard): def _generate_layout(self): if self.settings['model'] == 'linreg': return LinearRegressionDashboard(self).get_layout() elif self.settings['model'] == 'logreg': return LogisticRegressionDashboard(self).get_layout() elif self.settings['model'] == 'roc': return ROC(self).get_layout() elif self.settings['model'] == 'polynomreg': return PolynomRegressionDashboard(self).get_layout() else: raise ModelChoiceException class LinearRegressionDashboard(Dashboard): def __init__(self, predict: PredictionDashboard): Dashboard.__init__(self) self.predict = predict self.coord_list = [] def get_layout(self): return self._generate_layout() def _generate_layout(self): metrics_list = [] metrics_method = { 'model_quality': self._generate_quality(), 'signif': self._generate_signif(), 'resid': self._generate_resid(), 'equation': self._generate_equation(), 'distrib_resid': self._generate_distrib() } for metric in metrics_method: if metric in self.predict.settings['metrics']: metrics_list.append(metrics_method[metric]) # for metrics in self.predict.settings['metrics']: # metrics_list.append(metrics_method[metrics]) return html.Div([ html.Div(html.H1(children='Множественная регрессия'), style={'text-align': 'center'}), html.Div(metrics_list)]) # графики def _generate_distrib(self): df_Y = self.predict.df_Y_test predict_Y = LinearRegressionModel.predict( self.predict.model, self.predict.df_X_test) # График распределения остатков fig_rasp_2 = go.Figure() df_ost_2 = pd.DataFrame( {'Изначальный Y': df_Y, 'Предсказанный Y': predict_Y}) fig_rasp_2 = px.scatter(df_ost_2, x="Изначальный Y", y="Предсказанный Y", trendline="ols", trendline_color_override='red', labels='Данные') fig_rasp_2.update_traces(marker_size=20) fig = go.Figure( data=go.Histogram( x=df_Y - predict_Y, name='Остатки' ) ) fig.add_trace( go.Histogram( x=np.random.normal(0, 1, len(df_Y)), name='Нормальное распределение' ) ) fig.update_xaxes(title='Остатки') fig.update_layout(bargap=0.1) # специфичность residuals = df_Y - predict_Y num_divisions = residuals.shape[0] + 1 quantiles = np.arange(1, residuals.shape[0]) / num_divisions qq_x_data = sps.norm.ppf(quantiles) qq_y_data = np.sort(residuals) line_x0 = sps.norm.ppf(0.25) line_x1 = sps.norm.ppf(0.75) line_y0 = np.quantile(residuals, 0.25) line_y1 = np.quantile(residuals, 0.75) slope = (line_y1 - line_y0) / (line_x1 - line_x0) line_intercept = line_y1 - (slope * line_x1) x_range_line = np.arange(-3, 3, 0.001) y_values_line = (slope * x_range_line) + line_intercept fig_qqplot = go.Figure() fig_qqplot.add_trace( go.Scatter( x=qq_x_data, y=qq_y_data, mode='markers', marker={'color': 'blue'}, name='Остатки') ) fig_qqplot.add_trace( go.Scatter( x=x_range_line, y=y_values_line, mode='lines', marker={'color': 'red'}, name='Нормальное распределение')) fig_qqplot['layout'].update( xaxis={ 'title': 'Теоретические квантили', 'zeroline': True}, yaxis={ 'title': 'Экспериментальные квантили'}, showlegend=True, ) return html.Div([html.Div(html.H2(children='Графики остатков'), style={'text-align': 'center'}), html.Div([ html.Div( html.H4(children='Гистограмма распределения остатков'), style={'text-align': 'center'}), html.Div(dcc.Graph(id='Graph_ost_1', figure=fig), style={'text-align': 'center', 'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid'}), ], style={'margin': '50px'}), html.Div([ html.Div( html.H4(children='График соответствия предсказанных значений зависимой переменной ' 'и исходных значений'), style={'text-align': 'center'}), html.Div(dcc.Graph(id='Graph_ost_2', figure=fig_rasp_2), style={'text-align': 'center', 'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid'}), html.Div(dcc.Markdown(markdown_graph)) ], style={'margin': '50px'}), html.Div([ html.Div( html.H4(children='График квантиль-квантиль'), style={'text-align': 'center'}), html.Div(dcc.Graph(id='graph_qqplot', figure=fig_qqplot), style={'text-align': 'center', 'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid'}), ], style={'margin': '50px'}), ], style={'margin': '50px'}) # уравнение def _generate_equation(self): names = self.predict.settings['x'] name_Y = self.predict.settings['y'] b = self.predict.model.get_all_coef() uravnenie = LinearRegressionModel.uravnenie( self.predict.model, b, names, name_Y) df_X = self.predict.df_X_test b = self.predict.model.get_all_coef() def update_output(n_clicks, input1): number = len(self.coord_list) if n_clicks == 0 or input1 == 'Да': self.coord_list = [] number = len(self.coord_list) return u'''Введите значение параметра "{}"'''.format(df_X.columns[0]) if re.fullmatch(r'^([-+])?\d+([,.]\d+)?$', input1): number += 1 if input1.find(',') > 0: input1 = float(input1[0:input1.find( ',')] + '.' + input1[input1.find(',') + 1:len(input1)]) self.coord_list.append(float(input1)) if len(self.coord_list) < len(df_X.columns): return u'''Введите значение параметра "{}".'''.format(df_X.columns[number]) # максимальное значение - len(df_X.columns)-1 if len(self.coord_list) == len(df_X.columns): number = -1 yzn = b[0] for i in range(len(self.coord_list)): yzn += self.coord_list[i] * b[i + 1] return u'''Предсказанное значение равно {} \n Если желаете посчитать ещё для одного набор признаков , напишите "Да".'''.format(round(yzn, 3)) elif n_clicks > 0: return u'''Введено не число, введите значение параметра "{}" повторно.'''.format(df_X.columns[number]) if number == -1 and input1 != 0 and input1 != 'Да' and input1 != '0': return u'''Если желаете посчитать ещё для {} набор признаков, напишите "Да".'''.format('одного') self.predict.app.callback(dash.dependencies.Output('output-state', 'children'), [dash.dependencies.Input( 'submit-button-state', 'n_clicks')], [dash.dependencies.State('input-1-state', 'value')])(update_output) return html.Div([html.Div(html.H2(children='Уравнение множественной регрессии'), style={'text-align': 'center'}), html.Div([html.Div(dcc.Markdown(id='Markdown', children=uravnenie)), html.Div(html.H4(children='Предсказание новых значений'), style={'text-align': 'center'}), dcc.Markdown(children='Чтобы получить значение зависимой переменной, ' 'введите значение независимых признаков ниже:'), dcc.Input(id='input-1-state', type='text', value=''), html.Button(id='submit-button-state', n_clicks=0, children='Submit'), html.Div(id='output-state', children='')], style={'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid', 'padding': '5px'}) ], style={'margin': '50px'}) # качество модели def _generate_quality(self): df_result_1 = pd.DataFrame( columns=['Параметр', 'R', 'R2', 'R2adj', 'df', 'Fst', 'St.Error']) df_Y = self.predict.df_Y_test df_X = self.predict.df_X_test predict_Y = LinearRegressionModel.predict( self.predict.model, self.predict.df_X_test) mean_Y = LinearRegressionModel.get_mean(self.predict.model, df_Y) RSS = LinearRegressionModel.get_RSS( self.predict.model, predict_Y, mean_Y) de_fr = LinearRegressionModel.get_deg_fr( self.predict.model, self.predict.df_X_test) df_result_1.loc[1] = ['Значение', round(LinearRegressionModel.get_R(self.predict.model, df_Y, predict_Y), 3), round(LinearRegressionModel.score( self.predict.model), 3), round(LinearRegressionModel.get_R2_adj( self.predict.model, df_X, df_Y, predict_Y), 3), str(str(LinearRegressionModel.get_deg_fr(self.predict.model, df_X)[0]) + '; ' + str(LinearRegressionModel.get_deg_fr(self.predict.model, df_X)[1])), round(LinearRegressionModel.get_Fst( self.predict.model, df_X, df_Y, predict_Y), 3), round(LinearRegressionModel.get_st_err( self.predict.model, RSS, de_fr), 3) ] return html.Div([html.Div(html.H2(children='Критерии качества модели'), style={'text-align': 'center'}), html.Div([html.Div(dash_table.DataTable( id='table1', columns=[{"name": i, "id": i} for i in df_result_1.columns], data=df_result_1.to_dict('records'), export_format='xlsx' ), style={'width': str(len(df_result_1.columns) * 8 - 10) + '%', 'display': 'inline-block'}), html.Div(dcc.Markdown(markdown_linear_table1))], style={'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid', 'padding': '5px'})], style={'margin': '50px'}) # таблица остатков def _generate_resid(self): df_Y = self.predict.df_Y_test predict_Y = LinearRegressionModel.predict( self.predict.model, self.predict.df_X_test) mean_Y = sum(df_Y) / len(df_Y) TSS = LinearRegressionModel.get_TSS( self.predict.model, df_Y.tolist(), mean_Y) ESS = LinearRegressionModel.get_ESS( self.predict.model, df_Y.tolist(), predict_Y) RSS = LinearRegressionModel.get_RSS( self.predict.model, predict_Y, mean_Y) de_fr = LinearRegressionModel.get_deg_fr( self.predict.model, self.predict.df_X_test) d_1 = df_Y # зависимый признак d_2 = predict_Y # предсказанное значение d_3 = df_Y - predict_Y # остатки # стандартизированные предсказанные значения d_4 = (predict_Y - mean_Y) / ((TSS / len(predict_Y)) ** 0.5) d_5 = (df_Y - predict_Y) / ((ESS / len(df_Y)) ** 0.5) d_6 = df_Y * 0 + \ ((LinearRegressionModel.get_st_err( self.predict.model, RSS, de_fr) / len(df_Y)) ** 0.5) mean_list = [] # средние значения для каждого признака for i in range(self.predict.df_X_test.shape[1]): a = self.predict.df_X_test.iloc[:, i] mean_list.append( LinearRegressionModel.get_mean(self.predict.model, a)) mah_df = [] # тут будут расстояния Махалонобиса для всех наблюдений cov_mat_2 = LinearRegressionModel.get_cov_matrix_2(self.predict.model, self.predict.df_X_test) # ков. матрица без единичного столбца for j in range(self.predict.df_X_test.shape[0]): aa = self.predict.df_X_test.iloc[j, :] # строка с признаками meann = [] # список отличий от среднего for i in range(self.predict.df_X_test.shape[1]): meann.append(mean_list[i] - aa[i]) # расстояние для наблюдения mah_df.append( np.dot(np.dot(np.transpose(meann), cov_mat_2), meann)) df_result_3 = pd.DataFrame({'Номер наблюдения': 0, 'Исходное значение признака': np.round(d_1, 3), 'Рассчитанное значение признака': np.round(d_2, 3), 'Остатки': np.round(d_3, 3), 'Стандартные предсказанные значения': np.round(d_4, 3), 'Стандартизированные остатки': np.round(d_5, 3), 'Стандартная ошибка предсказанного значения': np.round(d_6, 3), 'Расстояние Махаланобиса': np.round(mah_df, 3)}) df_result_3.iloc[:, 0] = [i + 1 for i in range(df_result_3.shape[0])] return html.Div([html.Div(html.H2(children='Таблица остатков'), style={'text-align': 'center'}), html.Div([html.Div(dash_table.DataTable( id='table3', data=df_result_3.to_dict('records'), columns=[{"name": i, "id": i} for i in df_result_3.columns], # tooltip_header={i: i for i in df.columns}, # # либо этот, либо тот что ниже tooltip={i: { 'value': i, 'use_with': 'both' } for i in df_result_3.columns}, style_header={ 'textDecoration': 'underline', 'textDecorationStyle': 'dotted', }, style_cell={ 'overflow': 'hidden', 'textOverflow': 'ellipsis', 'maxWidth': 0, # len(df_result_3.columns)*5, }, # asdf page_size=20, fixed_rows={'headers': True}, style_table={'height': '330px', 'overflowY': 'auto'}, tooltip_delay=0, tooltip_duration=None, export_format='xlsx' ), style={'width': str(len(df_result_3.columns) * 8 - 10) + '%', 'display': 'inline-block'}), html.Div(dcc.Markdown(markdown_linear_table3))], style={'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid', 'padding': '5px'}) ], style={'margin': '50px'}) # таблица критериев значимости переменных def _generate_signif(self): df_Y = self.predict.df_Y_test predict_Y = LinearRegressionModel.predict( self.predict.model, self.predict.df_X_test) mean_Y = LinearRegressionModel.get_mean( self.predict.model, df_Y.tolist()) TSS = LinearRegressionModel.get_TSS( self.predict.model, df_Y.tolist(), mean_Y) de_fr = LinearRegressionModel.get_deg_fr( self.predict.model, self.predict.df_X_test) b = self.predict.model.get_all_coef() df_column = list(self.predict.df_X_test.columns) df_column.insert(0, 'Параметр') df_result_2 = pd.DataFrame(columns=df_column) t_st = LinearRegressionModel.t_stat(self.predict.model, self.predict.df_X_test, self.predict.df_Y_test, predict_Y, de_fr, b) cov_mat = LinearRegressionModel.get_cov_matrix( self.predict.model, self.predict.df_X_test) st_er_coef = LinearRegressionModel.st_er_coef( self.predict.model, self.predict.df_Y_test, predict_Y, cov_mat) p_values = LinearRegressionModel.p_values( self.predict.model, self.predict.df_X_test, t_st) b_st = LinearRegressionModel.st_coef( self.predict.model, self.predict.df_X_test, TSS, b) res_b = [] list_b = list(b) for j in range(1, len(list_b)): res_b.append(round(list_b[j], 3)) res_bst = [] list_bst = list(b_st) for j in range(len(list_bst)): res_bst.append(round(list_bst[j], 3)) res_errb = [] st_er_b = list(st_er_coef) for j in range(1, len(st_er_b)): res_errb.append(round(st_er_b[j], 3)) res_tst = [] for j in range(1, len(t_st)): res_tst.append(round(t_st[j], 3)) res_pval = [] for j in range(1, len(t_st)): res_pval.append(round(p_values[j], 3)) df_result_2 = pd.DataFrame({'Название переменной': self.predict.df_X.columns.tolist(), 'b': res_b, 'b_st': res_bst, 'St.Error b': res_errb, 't-критерий': res_tst, 'p-value': res_pval}) return html.Div([html.Div(html.H2(children='Критерии значимости переменных'), style={'text-align': 'center'}), html.Div([html.Div(dash_table.DataTable( id='table2', columns=[{"name": i, "id": i} for i in df_result_2.columns], data=df_result_2.to_dict('records'), style_table={'textOverflowX': 'ellipsis', }, tooltip={i: { 'value': i, 'use_with': 'both' } for i in df_result_2.columns}, tooltip_data=[ { column: {'value': str(value), 'type': 'markdown'} for column, value in row.items() } for row in df_result_2.to_dict('records') ], style_header={ 'textDecoration': 'underline', 'textDecorationStyle': 'dotted', }, style_cell={ 'overflow': 'hidden', 'textOverflow': 'ellipsis', 'maxWidth': 0, # len(df_result_3.columns)*5, }, export_format='xlsx' ), style={'width': str(len(df_result_2.columns) * 6) + '%', 'display': 'inline-block'}), html.Div(dcc.Markdown(markdown_linear_table2))], # style={'margin': '50px'}, style={'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid', 'padding': '5px'}) ], style={'margin': '50px'}) class LogisticRegressionDashboard(Dashboard): def __init__(self, predict: PredictionDashboard): Dashboard.__init__(self) self.predict = predict self.coord_list = [] def get_layout(self): return self._generate_layout() def _generate_layout(self): metrics_list = [self._generate_matrix()] metrics_method = { 'model_quality': self._generate_quality(), 'signif': self._generate_signif(), 'resid': self._generate_resid(), } for metric in metrics_method: if metric in self.predict.settings['metrics']: metrics_list.append(metrics_method[metric]) # for metrics in self.predict.settings['metrics']: # metrics_list.append(metrics_method[metrics]) df_X = self.predict.df_X_test if np.any((df_X.data if issparse(df_X) else df_X) < 0): return html.Div([html.Div(html.H1(children='Логистическая регрессия'), style={'text-align': 'center'}), html.Div(dcc.Markdown(markdown_error), style={'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid', 'padding': '5px'})], style={'margin': '50px'}) else: return html.Div([ html.Div(html.H1(children='Логистическая регрессия'), style={'text-align': 'center'}), html.Div(metrics_list)]) def _generate_matrix(self): df_X = self.predict.df_X_test y_true = self.predict.df_Y_test y_pred = LogisticRegressionModel.predict(self.predict.model, df_X) TN, FP, FN, TP = sm.confusion_matrix(y_true, y_pred).ravel() df_matrix = pd.DataFrame(columns=['y_pred\y_true', 'True', 'False']) df_matrix.loc[1] = ['True', TP, FP] df_matrix.loc[2] = ['False', FN, TN] return html.Div([html.Div(html.H2(children='Матрица классификации'), style={'text-align': 'center'}), html.Div([html.Div(dash_table.DataTable( id='table_matrix', columns=[{"name": i, "id": i} for i in df_matrix.columns], data=df_matrix.to_dict('records'), tooltip={i: { 'value': i, 'use_with': 'both' } for i in df_matrix.columns}, export_format='csv', style_header={ 'textDecoration': 'underline', 'textDecorationStyle': 'dotted', }, ), style={'width': str(len(df_matrix.columns) * 8) + '%', 'display': 'inline-block'}), #html.Div(dcc.Markdown(markdown_linear_table1)) ], style={'width': '78%', 'display': 'inline-block', 'border-color': 'rgb(220, 220, 220)', 'border-style': 'solid', 'padding': '5px'})], style={'margin': '50px'}) # качество модели def _generate_quality(self): df_result_1 =
pd.DataFrame(columns=['Критерий', 'Хи-квадрат', 'Степень свободы', 'p-value'])
pandas.DataFrame
import os import json import pandas as pd class RayResultsParser(): def __init__(self): pass def _load_run(self, path): result_file = os.path.join(path, "result.json") #result_file = path if not os.path.exists(result_file): return None with open(result_file,'r') as f: lines = f.readlines() if len(lines) > 0: return json.loads(lines[-1]) else: return None def _load_all_runs(self, path): runs = [r for r in os.listdir(path) if os.path.isdir(os.path.join(path,r))] result_list = list() for run in runs: runpath = os.path.join(path, run) run = self._load_run(runpath) if run is None: continue else: result = run config = result.pop("config") # merge result dict and config dict for key, value in config.items(): result[key] = value result_list.append(result) return result_list def _get_n_best_runs(self, experimentpath, n=5, group_by=["hidden_dims", "learning_rate", "num_rnn_layers"]): resultlist = self._load_all_runs(experimentpath) if len(resultlist) == 0: print("Warning! Experiment {} returned no runs".format(experimentpath)) return None result = pd.DataFrame(resultlist) # average accuracy over the same columns (particularily over the fold variable...) grouped = result.groupby(group_by)["accuracy"] nfolds = grouped.count().rename("nfolds") mean_accuracy = grouped.mean().rename("mean_accuracy") std_accuracy = grouped.std().rename("std_accuracy") score = pd.concat([mean_accuracy, std_accuracy, nfolds], axis=1) top = score.nlargest(n, "mean_accuracy") top["runs"] = len(score) dataset = os.path.basename(experimentpath) top.reset_index(inplace=True) top["dataset"] = dataset return top def get_sota_experiment(self, path, outpath=None, columns=["accuracy", "earliness"]): data = self._load_all_runs(path) print("{} runs returned!".format(len(data))) data = pd.DataFrame(data).set_index(["dataset"]) data.sort_values(by="accuracy", ascending=False).drop_duplicates( subset=['earliness_factor', 'entropy_factor', 'ptsepsilon'], keep='first') data[columns].to_csv(outpath) def get_best_hyperparameters(self, path, hyperparametercsv=None, group_by=["hidden_dims", "learning_rate", "num_rnn_layers"], n=1): experiments = os.listdir(path) best_hyperparams = list() for experiment in experiments: experimentpath = os.path.join(path,experiment) if os.path.isdir(experimentpath): print("parsing experiment "+experiment) result = self._get_n_best_runs(experimentpath=experimentpath, n=n, group_by=group_by) if result is not None: best_hyperparams.append(result) summary =
pd.concat(best_hyperparams)
pandas.concat
# coding:utf-8 """ 交易模拟器,用于研究单标的的买卖点变化过程 """ import sys sys.path.insert(0, "C:\git_repo\zengbin93\chan") import chan print(chan.__version__) import os import time import traceback import pandas as pd import tushare as ts from datetime import datetime, timedelta from chan import SolidAnalyze, KlineAnalyze from chan import plot_kline # 首次使用,需要在这里设置你的 tushare token,用于获取数据;在同一台机器上,tushare token 只需要设置一次 # 没有 token,到 https://tushare.pro/register?reg=7 注册获取 # ts.set_token("your tushare token") freq_map = {"1min": "1分钟", '5min': "5分钟", "30min": "30分钟", "D": "日线"} def is_trade_day(date): """判断date日期是不是交易日 :param date: str or datetime.date, 如 20180315 :return: Bool """ FILE_CALENDAR = "calendar.csv" if os.path.exists(FILE_CALENDAR) and \ time.time() - os.path.getmtime(FILE_CALENDAR) < 3600 * 24: trade_calendar = pd.read_csv(FILE_CALENDAR, encoding='utf-8', dtype={"cal_date": str}) else: pro = ts.pro_api() trade_calendar = pro.trade_cal() # tushare提供的交易日历 trade_calendar.to_csv(FILE_CALENDAR, index=False, encoding='utf-8') trade_day = trade_calendar[trade_calendar["is_open"] == 1] trade_day_list = [str(x).replace("-", "") for x in list(trade_day['cal_date'])] if isinstance(date, datetime): date = str(date.date()).replace("-", "") if date.replace("-", "") in trade_day_list: return True else: return False def _get_start_date(end_date, freq): end_date = datetime.strptime(end_date, '%Y%m%d') if freq == '1min': start_date = end_date - timedelta(days=70) elif freq == '5min': start_date = end_date - timedelta(days=150) elif freq == '30min': start_date = end_date - timedelta(days=1000) elif freq == 'D': start_date = end_date - timedelta(weeks=1000) elif freq == 'W': start_date = end_date - timedelta(weeks=1000) else: raise ValueError("'freq' value error, current value is %s, " "optional valid values are ['1min', '5min', '30min', " "'D', 'W']" % freq) return start_date def get_kline(ts_code, end_date, start_date=None, freq='30min', asset='E'): """获取指定级别的前复权K线 :param ts_code: str 股票代码,如 600122.SH :param freq: str K线级别,可选值 [1min, 5min, 15min, 30min, 60min, D, M, Y] :param end_date: str 日期,如 20190610 :param start_date: :param asset: str 交易资产类型,可选值 E股票 I沪深指数 C数字货币 FT期货 FD基金 O期权 CB可转债(v1.2.39),默认E :return: pd.DataFrame columns = ["symbol", "dt", "open", "close", "high", "low", "vol"] """ if not start_date: start_date = _get_start_date(end_date, freq) start_date = start_date.date().__str__().replace("-", "") if freq.endswith('min'): end_date = datetime.strptime(end_date, '%Y%m%d') end_date = end_date + timedelta(days=1) end_date = end_date.date().__str__().replace("-", "") df = ts.pro_bar(ts_code=ts_code, freq=freq, start_date=start_date, end_date=end_date, adj='qfq', asset=asset) # 统一 k 线数据格式为 6 列,分别是 ["symbol", "dt", "open", "close", "high", "low", "vr"] if "min" in freq: df.rename(columns={'ts_code': "symbol", "trade_time": "dt"}, inplace=True) else: df.rename(columns={'ts_code': "symbol", "trade_date": "dt"}, inplace=True) df.drop_duplicates(subset='dt', keep='first', inplace=True) df.sort_values('dt', inplace=True) df['dt'] = df.dt.apply(str) if freq.endswith("min"): # 清理 9:30 的空数据 df['not_start'] = df.dt.apply(lambda x: not x.endswith("09:30:00")) df = df[df['not_start']] df.reset_index(drop=True, inplace=True) if freq == 'D': df['dt'] = df['dt'].apply(lambda x: datetime.strptime(x, "%Y%m%d").strftime("%Y-%m-%d %H:%M:%S")) k = df[['symbol', 'dt', 'open', 'close', 'high', 'low', 'vol']] for col in ['open', 'close', 'high', 'low']: k[col] = k[col].apply(round, args=(2,)) return k def get_klines(ts_code, end_date, freqs='1min,5min,30min,D', asset='E'): """获取不同级别K线""" klines = dict() freqs = freqs.split(",") for freq in freqs: df = get_kline(ts_code, end_date, freq=freq, asset=asset) klines[freq_map[freq]] = df return klines def make_klines(k1): """从1分钟K线构造多级别K线 :param k1: pd.DataFrame 1分钟K线,输入的1分钟K线必须是交易日当天的全部1分钟K线,如果是实时行情,则是截止到交易时间的全部K线 :return: """ first_dt = k1.iloc[0]['dt'] kd = pd.DataFrame([{ 'symbol': k1.iloc[0]['symbol'], 'dt': first_dt.split(" ")[0] + " 00:00:00", 'open': k1.iloc[0]['open'], 'close': k1.iloc[-1]['close'], 'high': max(k1.high), 'low': min(k1.low), 'vol': round(sum(k1.vol) / 100, 2) }]) if first_dt.endswith("09:30:00"): k1 = k1.iloc[1:] cols = ['symbol', 'dt', 'open', 'close', 'high', 'low', 'vol'] def _minute_kline(freq): p = {'5min': 5, '15min': 15, '30min': 30, '60min': 60} d = p[freq] k2 = [] i = 0 while i * d < len(k1): df = k1.iloc[i * d: (i + 1) * d] symbol = df.iloc[0]['symbol'] dt = df.iloc[-1]['dt'] open_ = df.iloc[0]['open'] close_ = df.iloc[-1]['close'] high_ = max(df.high) low_ = min(df.low) vol_ = sum(df.vol) k = {"symbol": symbol, "dt": dt, "open": open_, "close": close_, "high": high_, "low": low_, "vol": vol_} k2.append(k) i += 1 k2 = pd.DataFrame(k2) return k2[cols] klines = {"1分钟": k1, '5分钟': _minute_kline('5min'), '30分钟': _minute_kline('30min'), "日线": kd[cols]} return klines def kline_simulator(ts_code, trade_dt, asset="E", count=5000): """K线模拟器(精确到分钟),每次模拟一天 >>> ks = kline_simulator(ts_code="300803.SZ", trade_dt='20200310') >>> for k in ks.__iter__(): >>> print(k['5分钟'].tail(2)) """ if "-" in trade_dt: dt1 = datetime.strptime(trade_dt, '%Y-%m-%d') else: dt1 = datetime.strptime(trade_dt, '%Y%m%d') last_date = dt1 - timedelta(days=1) init_klines = get_klines(ts_code, last_date.strftime("%Y%m%d"), freqs='1min,5min,30min,D', asset=asset) k1 = get_kline(ts_code, end_date=dt1.strftime("%Y%m%d"), start_date=dt1.strftime("%Y%m%d"), freq='1min', asset=asset) if k1.iloc[0]['dt'].endswith("09:30:00"): k1 = k1.iloc[1:] for i in range(1, len(k1)+1): k1_ = k1.iloc[:i] klines = make_klines(k1_) # 合并成新K线 new_klines = dict() for freq in init_klines.keys(): new_klines[freq] = pd.concat([init_klines[freq], klines[freq]]).tail(count) yield new_klines def trade_simulator(ts_code, end_date, file_bs, start_date=None, days=3, asset="E", watch_interval=5): """单只标的类实盘模拟,研究买卖点变化过程 :param file_bs: :param ts_code: str 标的代码,如 300033.SZ :param end_date: str 截止日期,如 20200312 :param start_date: str 开始日期 :param days: int 从截止日线向前推的天数 :param asset: str tushare 中的资产类型编码 :param watch_interval: int 看盘间隔,单位:分钟;默认值为 5分钟看盘一次 :return: None """ end_date = datetime.strptime(end_date.replace("-", ""), "%Y%m%d") if not start_date: start_date = end_date - timedelta(days=days) else: start_date = datetime.strptime(start_date.replace("-", ""), "%Y%m%d") results = [] while start_date <= end_date: if (asset in ["E", "I"]) and (not is_trade_day(start_date.strftime('%Y%m%d'))): start_date += timedelta(days=1) continue ks = kline_simulator(ts_code, trade_dt=start_date.strftime('%Y%m%d'), asset=asset) for i, klines in enumerate(ks.__iter__(), 1): latest_dt = klines['1分钟'].iloc[-1]['dt'] latest_price = klines['1分钟'].iloc[-1]['close'] if i % watch_interval != 0: continue print(latest_dt) sa = SolidAnalyze(klines) for func in [sa.is_first_buy, sa.is_second_buy, sa.is_third_buy, sa.is_xd_buy, sa.is_first_sell, sa.is_second_sell, sa.is_third_sell, sa.is_xd_sell]: for freq in ['1分钟', '5分钟', '30分钟']: try: b, detail = func(freq, tolerance=0.1) if b: detail['交易时间'] = latest_dt detail['交易价格'] = latest_price detail['交易级别'] = freq print(detail) results.append(detail) except: traceback.print_exc() continue df = pd.DataFrame(results) df.sort_values('交易时间', inplace=True) df = df.drop_duplicates(['出现时间', '基准价格', '操作提示', '标的代码']) cols = ['标的代码', '操作提示', '交易时间', '交易价格', '交易级别', '出现时间', '基准价格', '其他信息'] df = df[cols] df.to_excel(file_bs, index=False) start_date += timedelta(days=1) def check_trade(ts_code, file_bs, freq, end_date="20200314", asset="E", file_html="bs.html"): """在图上画出买卖点""" bs =
pd.read_excel(file_bs)
pandas.read_excel