author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
641,014
05.08.2020 09:42:03
-32,400
d03938dfbab4301c0a302df6ae8418927049b8f5
add types.py
[ { "change_type": "ADD", "old_path": null, "new_path": "obp/types.py", "diff": "+# Copyright (c) ZOZO Technologies, Inc. All rights reserved.\n+# Licensed under the Apache 2.0 License.\n+\n+\"\"\"Types.\"\"\"\n+from typing import Union, Dict\n+import numpy as np\n+\n+from .policy import BaseContextFreePolicy, BaseContextualPolicy\n+\n+# dataset\n+BanditFeedback = Dict[str, Union[str, np.ndarray]]\n+\n+# policy\n+BanditPolicy = Union[BaseContextFreePolicy, BaseContextualPolicy]\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add types.py
641,014
14.08.2020 16:22:11
-32,400
c0d091d9de8cc14d09ee8ba195e8130dd20ef950
rename dirs in examples
[ { "change_type": "RENAME", "old_path": "examples/obd/README.md", "new_path": "examples/examples_with_obd/README.md", "diff": "@@ -46,9 +46,9 @@ python evaluate_off_policy_estimators.py\\\n# random_state=12345\n# --------------------------------------------------\n# mean 95.0% CI (lower) 95.0% CI (upper)\n-# dm 0.213823 0.14168 0.27770\n-# ipw 1.158730 0.96190 1.53333\n-# dr 1.105379 0.90189 1.42545\n+# dm 0.213823 0.141678 0.277700\n+# ipw 1.158730 0.961905 1.533333\n+# dr 1.105379 0.901894 1.425447\n# ==================================================\n```\n" }, { "change_type": "RENAME", "old_path": "examples/obd/conf/batch_size_bts.yaml", "new_path": "examples/examples_with_obd/conf/batch_size_bts.yaml", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/obd/conf/lightgbm.yaml", "new_path": "examples/examples_with_obd/conf/lightgbm.yaml", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/obd/conf/prior_bts.yaml", "new_path": "examples/examples_with_obd/conf/prior_bts.yaml", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/obd/custom_dataset.py", "new_path": "examples/examples_with_obd/custom_dataset.py", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/obd/evaluate_counterfactual_policy.py", "new_path": "examples/examples_with_obd/evaluate_counterfactual_policy.py", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/obd/evaluate_off_policy_estimators.py", "new_path": "examples/examples_with_obd/evaluate_off_policy_estimators.py", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/synthetic/README.md", "new_path": "examples/examples_with_synthetic/README.md", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/synthetic/evaluate_off_policy_estimators.py", "new_path": "examples/examples_with_synthetic/evaluate_off_policy_estimators.py", "diff": "" }, { "change_type": "MODIFY", "old_path": "examples/quickstart/quickstart.ipynb", "new_path": "examples/quickstart/quickstart.ipynb", "diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"# Qucik Start: Use Cases and Examples with OBD\\n\",\n+ \"# Quick Start: Use Cases and Examples with *Open Bandit Pipeline*\\n\",\n\"---\\n\",\n\"This notebook shows an example of conducting an offline evaluation of the performance of Bernoulli Thompson Sampling (BernoulliTS) as a counterfactual policy using OPE estimators and logged bandit feedback generated by the Random policy (behavior policy).\\n\",\n\"\\n\",\n" }, { "change_type": "MODIFY", "old_path": "examples/quickstart/quickstart_synthetic.ipynb", "new_path": "examples/quickstart/quickstart_synthetic.ipynb", "diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"# Qucik Start: Use Cases and Examples with Synthetic Data\\n\",\n+ \"# Quick Start: Use Cases and Examples with Synthetic Data\\n\",\n\"---\\n\",\n\"This notebook provides an example of conducting an offline evaluation of the performance of two counterfactual policies using OPE estimators and synthetic logged bandit feedback.\\n\",\n\"\\n\",\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
rename dirs in examples
641,014
17.08.2020 10:27:37
-32,400
f06de3fc6fa624c36b3708b463de3b18e784ba5d
add assertion errors
[ { "change_type": "MODIFY", "old_path": "examples/examples_with_obd/README.md", "new_path": "examples/examples_with_obd/README.md", "diff": "@@ -5,12 +5,12 @@ We then evaluate the performances of some contextual bandit policies by using OP\n## Descriptions\n-- `conf/`\n- - [`./conf/batch_size_bts.yaml`]:\n+- [`conf/`](https://github.com/st-tech/zr-obp/tree/master/examples/examples_with_obd/conf)\n+ - `batch_size_bts.yaml`:\nThe batch sizes used in the Bernoulli Thompson Sampling policy when running it on the ZOZOTOWN platform\n- - [`./conf/prior_bts.yaml`]\n+ - `prior_bts.yaml`:\nThe prior hyperparameters used in the Bernoulli Thompson Sampling policy when running it on the ZOZOTOWN platform\n- - [`./conf/lightgbm.yaml`]\n+ - `lightgbm.yaml`:\nThe hyperparameters of the LightGBM model that is used as the regression model in model dependent OPE estimators such as DM and DR\n- [`custom_dataset.py`](./custom_dataset.py):\n@@ -60,7 +60,7 @@ python evaluate_off_policy_estimators.py\\\n# ==================================================\n```\n-Please visit [Examples with Synthetic Data](https://github.com/st-tech/zr-obp/tree/master/examples/synthetic) to try the evaluation of OPE estimators with a larger dataset.\n+Please visit [Examples with Synthetic Data](https://github.com/st-tech/zr-obp/tree/master/examples/examples_with_synthetic) to try the evaluation of OPE estimators with a larger dataset.\n**Evaluating Counterfactual Bandit Policy**\n" }, { "change_type": "MODIFY", "old_path": "obp/dataset/synthetic.py", "new_path": "obp/dataset/synthetic.py", "diff": "@@ -30,10 +30,10 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nn_actions: int\nNumber of actions.\n- dim_context: int\n+ dim_context: int, default: 1\nNumber of dimensions of context vectors.\n- dim_action_context: int\n+ dim_action_context: int, default: 1\nNumber of dimensions of context vectors for each action.\nreward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default: None\n@@ -46,7 +46,7 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nFunction generating probability distribution over action space,\ni.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\nIf None is set, context **independent** probability of choosing each action will be\n- sampled from the uniform distribution automatically (context-free behavior policy).\n+ sampled from the dirichlet distribution automatically (context-free behavior policy).\nrandom_state: int, default: None\nControls the random seed in sampling synthetic bandit dataset.\n@@ -108,8 +108,8 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\n\"\"\"\nn_actions: int\n- dim_context: int\n- dim_action_context: int\n+ dim_context: int = 1\n+ dim_action_context: int = 1\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n@@ -119,32 +119,40 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n+ assert self.n_actions > 1 and isinstance(\n+ self.n_actions, int\n+ ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n+ assert self.dim_context > 0 and isinstance(\n+ self.dim_context, int\n+ ), f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n+ assert self.dim_action_context > 0 and isinstance(\n+ self.dim_action_context, int\n+ ), f\"dim_action_context must be a positive integer, but {self.dim_action_context} is given\"\n+\nself.random_ = check_random_state(self.random_state)\n- self.sample_action_context()\nif self.reward_function is None:\n- self.sample_contextfree_expected_reward()\n+ self.expected_reward = self.sample_contextfree_expected_reward()\nif self.behavior_policy_function is None:\n- self.sample_contextfree_behavior_policy()\n+ self.behavior_policy = self.sample_contextfree_behavior_policy()\n+ self.action_context = self.sample_action_context()\n@property\ndef len_list(self) -> int:\n\"\"\"Length of recommendation lists.\"\"\"\nreturn 1\n- def sample_action_context(self) -> None:\n+ def sample_action_context(self) -> np.ndarray:\n\"\"\"Sample action context vectors from the standard normal distribution.\"\"\"\n- self.action_context = self.random_.normal(\n- size=(self.n_actions, self.dim_action_context)\n- )\n+ return self.random_.normal(size=(self.n_actions, self.dim_action_context))\ndef sample_contextfree_expected_reward(self) -> np.ndarray:\n\"\"\"Sample expected reward for each action from the uniform distribution.\"\"\"\n- self.expected_reward = self.random_.uniform(size=self.n_actions)\n+ return self.random_.uniform(size=self.n_actions)\ndef sample_contextfree_behavior_policy(self) -> np.ndarray:\n\"\"\"Sample probability of choosing each action from the dirichlet distribution.\"\"\"\nalpha = self.random_.uniform(size=self.n_actions)\n- self.behavior_policy = self.random_.dirichlet(alpha=alpha)\n+ return self.random_.dirichlet(alpha=alpha)\ndef obtain_batch_bandit_feedback(self, n_rounds: int) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\n@@ -201,6 +209,7 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nn_rounds=n_rounds,\nn_actions=self.n_actions,\ncontext=context,\n+ action_context=self.action_context,\naction=action,\nposition=np.zeros(n_rounds, dtype=int),\nreward=reward,\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add assertion errors
641,014
24.08.2020 16:37:11
-32,400
ccc7d5d574a41b23227512e4b69d9c0420e215ed
update docs and docstring
[ { "change_type": "MODIFY", "old_path": "docs/index.rst", "new_path": "docs/index.rst", "diff": "You can adapt this file completely to your liking, but it should at least\ncontain the root `toctree` directive.\n-Welcome to obp's documentation!\n-====================================\n-\n.. image:: ./_static/images/logo.png\n:scale: 20%\n:align: center\n-Open Bandit Dataset and Pipeline\n-=====================================\n+Open Bandit Pipeline; a python library for bandit algorithms and off-policy evaluation\n+=========================================================================================\nOverview\n~~~~~~~~~~~~\n-*Open Bandit Pipeline (OBP)* is a Python 3.7+ toolkit for bandit algorithms and off-policy evaluation (OPE).\n+*Open Bandit Pipeline (OBP)* is an open source end-to-end python library for bandit algorithms and off-policy evaluation (OPE).\nThe toolkit comes with the *Open Bandit Dataset* , a large-scale logged bandit feedback data collected on a fashion e-commerce platform, `ZOZOTOWN <https://corp.zozo.com/en/service/>`_.\nThe purpose of the open data and library is to enable easy, realistic, and reproducible evaluation of bandit algorithms and OPE.\nOBP has a series of implementations of dataset preprocessing, bandit policy interfaces, offline bandit simulator, and standard OPE estimators.\n" }, { "change_type": "MODIFY", "old_path": "obp/dataset/real.py", "new_path": "obp/dataset/real.py", "diff": "@@ -24,7 +24,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\nUsers are free to implement their own feature engineering by overriding `pre_process` method.\nParameters\n- ----------\n+ -----------\nbehavior_policy: str\nName of the behavior policy that generated the log data.\nMust be 'random' or 'bts'.\n@@ -38,6 +38,11 @@ class OpenBanditDataset(BaseRealBanditDataset):\ndataset_name: str, default: 'obd'\nName of the dataset.\n+ References\n+ ------------\n+ Yuta Saito, Shunsuke Aihara, Megumi Matsutani, Yusuke Narita.\n+ \"A Large-scale Open Dataset for Bandit Algorithms.\", 2020.\n+\n\"\"\"\nbehavior_policy: str\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
update docs and docstring
641,014
25.08.2020 12:24:09
-32,400
34f47f4f0da40aa8bcb9f556e608c42969568274
fix error in regression model
[ { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -100,7 +100,7 @@ class OffPolicyEvaluation:\nraise RuntimeError(f\"Missing key of {key_} in 'bandit_feedback'.\")\nif self.regression_model is not None:\n- if check_is_fitted(self.regression_model):\n+ if check_is_fitted(self.regression_model.base_model):\nlogger.info(\"a fitted regression model is given.\")\nelse:\nlogger.info(\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix error in regression model
641,014
25.08.2020 12:51:22
-32,400
c717ec4df21a42b38090f6c2d07efaa8486364ab
add benchmarking experiments on OPE estimators
[ { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/README.md", "diff": "+# Benchmarking Off-Policy Estimators\n+\n+## Overview\n+\n+\n+## Experimental Settings\n+\n+\n+## Usage\n+\n+\n+## Benchmark Results\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/benchmark_off_policy_estimators.py", "diff": "+import argparse\n+import pickle\n+from pathlib import Path\n+import yaml\n+\n+import numpy as np\n+import pandas as pd\n+\n+from obp.dataset import OpenBanditDataset\n+from obp.simulator import run_bandit_simulation\n+from obp.policy import Random, BernoulliTS\n+from obp.ope import (\n+ OffPolicyEvaluation,\n+ InverseProbabilityWeighting,\n+ SelfNormalizedInverseProbabilityWeighting,\n+ DirectMethod,\n+ DoublyRobust,\n+ SwitchDoublyRobust,\n+)\n+from obp.utils import estimate_confidence_interval_by_bootstrap\n+\n+# configurations to reproduce the Bernoulli Thompson Sampling policy\n+# used in ZOZOTOWN production\n+with open(\"./conf/prior_bts.yaml\", \"rb\") as f:\n+ production_prior_for_bts = yaml.safe_load(f)\n+\n+with open(\"./conf/batch_size_bts.yaml\", \"rb\") as f:\n+ production_batch_size_for_bts = yaml.safe_load(f)\n+\n+counterfactual_policy_dict = dict(bts=BernoulliTS, random=Random)\n+\n+if __name__ == \"__main__\":\n+ parser = argparse.ArgumentParser(description=\"evaluate off-policy estimators.\")\n+ parser.add_argument(\n+ \"--n_boot_samples\",\n+ type=int,\n+ default=1,\n+ help=\"number of bootstrap samples in the experiment.\",\n+ )\n+ parser.add_argument(\n+ \"--base_model\",\n+ type=str,\n+ choices=[\"logistic_regression\", \"lightgbm\"],\n+ required=True,\n+ help=\"base ML model for regression model, logistic_regression or lightgbm.\",\n+ )\n+ parser.add_argument(\n+ \"--counterfactual_policy\",\n+ type=str,\n+ choices=[\"bts\", \"random\"],\n+ required=True,\n+ help=\"counterfactual policy, bts or random.\",\n+ )\n+ parser.add_argument(\n+ \"--behavior_policy\",\n+ type=str,\n+ choices=[\"bts\", \"random\"],\n+ required=True,\n+ help=\"behavior policy, bts or random.\",\n+ )\n+ parser.add_argument(\n+ \"--campaign\",\n+ type=str,\n+ choices=[\"all\", \"men\", \"women\"],\n+ required=True,\n+ help=\"campaign name, men, women, or all.\",\n+ )\n+ parser.add_argument(\"--random_state\", type=int, default=12345)\n+ args = parser.parse_args()\n+ print(args)\n+\n+ # configurations of the benchmark experiment\n+ n_boot_samples = args.n_boot_samples\n+ base_model = args.base_model\n+ counterfactual_policy = args.counterfactual_policy\n+ behavior_policy = args.behavior_policy\n+ campaign = args.campaign\n+ random_state = args.random_state\n+ data_path = Path(\"../open_bandit_dataset\")\n+ # prepare path\n+ log_path = Path(\"./logs\") / behavior_policy / campaign / base_model\n+ reg_model_path = log_path / \"trained_reg_models\"\n+\n+ obd = OpenBanditDataset(\n+ behavior_policy=behavior_policy, campaign=campaign, data_path=data_path\n+ )\n+ # hyparparameters for counterfactual policies\n+ kwargs = dict(\n+ n_actions=obd.n_actions, len_list=obd.len_list, random_state=random_state\n+ )\n+ if counterfactual_policy == \"bts\":\n+ kwargs[\"alpha\"] = production_prior_for_bts[campaign][\"alpha\"]\n+ kwargs[\"beta\"] = production_prior_for_bts[campaign][\"beta\"]\n+ kwargs[\"batch_size\"] = production_batch_size_for_bts[campaign]\n+ policy = counterfactual_policy_dict[counterfactual_policy](**kwargs)\n+ # compared OPE estimators\n+ ope_estimators = [\n+ DirectMethod(),\n+ InverseProbabilityWeighting(),\n+ SelfNormalizedInverseProbabilityWeighting(),\n+ DoublyRobust(),\n+ SwitchDoublyRobust(tau=1000),\n+ ]\n+ # ground-truth policy value of a counterfactual policy\n+ # , which is estimated with factual (observed) rewards (on-policy estimation)\n+ ground_truth_policy_value = OpenBanditDataset.calc_on_policy_policy_value_estimate(\n+ behavior_policy=counterfactual_policy, campaign=campaign, data_path=data_path\n+ )\n+\n+ evaluation_of_ope_results = {\n+ est.estimator_name: np.zeros(n_boot_samples) for est in ope_estimators\n+ }\n+ for b in np.arange(n_boot_samples):\n+ # load the pre-trained regression model\n+ with open(reg_model_path / f\"reg_model_{b}.pkl\", \"rb\") as f:\n+ reg_model = pickle.load(f)\n+ # sample bootstrap from batch logged bandit feedback\n+ boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(random_state=b)\n+ # run a counterfactual bandit algorithm on logged bandit feedback data\n+ selected_actions = run_bandit_simulation(\n+ bandit_feedback=boot_bandit_feedback, policy=policy\n+ )\n+ # evaluate the estimation performance of OPE estimators\n+ ope = OffPolicyEvaluation(\n+ bandit_feedback=boot_bandit_feedback,\n+ action_context=obd.action_context,\n+ regression_model=reg_model,\n+ ope_estimators=ope_estimators,\n+ )\n+ relative_estimation_errors = ope.evaluate_performance_of_estimators(\n+ selected_actions=selected_actions,\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ )\n+ policy.initialize()\n+ # store relative estimation errors of OPE estimators at each split\n+ for (\n+ estimator_name,\n+ relative_estimation_error,\n+ ) in relative_estimation_errors.items():\n+ evaluation_of_ope_results[estimator_name][b] = relative_estimation_error\n+\n+ # estimate confidence intervals of relative estimation by nonparametric bootstrap method\n+ evaluation_of_ope_results_with_ci = {\n+ est.estimator_name: dict() for est in ope_estimators\n+ }\n+ for estimator_name in evaluation_of_ope_results_with_ci.keys():\n+ evaluation_of_ope_results_with_ci[\n+ estimator_name\n+ ] = estimate_confidence_interval_by_bootstrap(\n+ samples=evaluation_of_ope_results[estimator_name], random_state=random_state\n+ )\n+ evaluation_of_ope_results_df = pd.DataFrame(evaluation_of_ope_results_with_ci).T\n+\n+ print(\"=\" * 50)\n+ print(f\"random_state={random_state}\")\n+ print(\"-\" * 50)\n+ print(evaluation_of_ope_results_df)\n+ print(\"=\" * 50)\n+\n+ # save results of the evaluation of off-policy estimators in './logs' directory.\n+ evaluation_of_ope_results_df.to_csv(log_path / f\"comparison_of_ope_estimators.csv\")\n" }, { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/conf/batch_size_bts.yaml", "diff": "+all: 1800\n+men: 3200\n+women: 4850\n" }, { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/conf/hyperparam.yaml", "diff": "+lightgbm:\n+ max_iter: 1000\n+ learning_rate: 0.03\n+ min_samples_leaf: 5\n+ random_state: 12345\n+logistic_regression:\n+ max_iter: 10000\n+ C: 1000\n+ random_state: 12345\n" }, { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/conf/prior_bts.yaml", "diff": "+all:\n+ alpha:\n+ - 47.0\n+ - 8.0\n+ - 62.0\n+ - 142.0\n+ - 3.0\n+ - 14.0\n+ - 7.0\n+ - 857.0\n+ - 12.0\n+ - 15.0\n+ - 6.0\n+ - 100.0\n+ - 48.0\n+ - 23.0\n+ - 71.0\n+ - 61.0\n+ - 13.0\n+ - 16.0\n+ - 518.0\n+ - 30.0\n+ - 7.0\n+ - 4.0\n+ - 23.0\n+ - 8.0\n+ - 10.0\n+ - 11.0\n+ - 11.0\n+ - 18.0\n+ - 121.0\n+ - 11.0\n+ - 11.0\n+ - 10.0\n+ - 14.0\n+ - 9.0\n+ - 204.0\n+ - 58.0\n+ - 3.0\n+ - 19.0\n+ - 42.0\n+ - 1013.0\n+ - 2.0\n+ - 328.0\n+ - 15.0\n+ - 31.0\n+ - 14.0\n+ - 138.0\n+ - 45.0\n+ - 55.0\n+ - 23.0\n+ - 38.0\n+ - 10.0\n+ - 401.0\n+ - 52.0\n+ - 6.0\n+ - 3.0\n+ - 6.0\n+ - 5.0\n+ - 32.0\n+ - 35.0\n+ - 133.0\n+ - 52.0\n+ - 820.0\n+ - 43.0\n+ - 195.0\n+ - 8.0\n+ - 42.0\n+ - 40.0\n+ - 4.0\n+ - 32.0\n+ - 30.0\n+ - 9.0\n+ - 22.0\n+ - 6.0\n+ - 23.0\n+ - 5.0\n+ - 54.0\n+ - 8.0\n+ - 22.0\n+ - 65.0\n+ - 246.0\n+ beta:\n+ - 12198.0\n+ - 3566.0\n+ - 15993.0\n+ - 35522.0\n+ - 2367.0\n+ - 4609.0\n+ - 3171.0\n+ - 181745.0\n+ - 4372.0\n+ - 4951.0\n+ - 3100.0\n+ - 24665.0\n+ - 13210.0\n+ - 7061.0\n+ - 18061.0\n+ - 17449.0\n+ - 5644.0\n+ - 6787.0\n+ - 111326.0\n+ - 8776.0\n+ - 3334.0\n+ - 2271.0\n+ - 7389.0\n+ - 2659.0\n+ - 3665.0\n+ - 4724.0\n+ - 3561.0\n+ - 5085.0\n+ - 27407.0\n+ - 4601.0\n+ - 4756.0\n+ - 4120.0\n+ - 4736.0\n+ - 3788.0\n+ - 45292.0\n+ - 14719.0\n+ - 2189.0\n+ - 5589.0\n+ - 11995.0\n+ - 222255.0\n+ - 2308.0\n+ - 70034.0\n+ - 4801.0\n+ - 8274.0\n+ - 5421.0\n+ - 31912.0\n+ - 12213.0\n+ - 13576.0\n+ - 6230.0\n+ - 10382.0\n+ - 4141.0\n+ - 85731.0\n+ - 12811.0\n+ - 2707.0\n+ - 2250.0\n+ - 2668.0\n+ - 2886.0\n+ - 9581.0\n+ - 9465.0\n+ - 28336.0\n+ - 12062.0\n+ - 162793.0\n+ - 12107.0\n+ - 41240.0\n+ - 3162.0\n+ - 11604.0\n+ - 10818.0\n+ - 2923.0\n+ - 8897.0\n+ - 8654.0\n+ - 4000.0\n+ - 6580.0\n+ - 3174.0\n+ - 6766.0\n+ - 2602.0\n+ - 14506.0\n+ - 3968.0\n+ - 7523.0\n+ - 16532.0\n+ - 51964.0\n+men:\n+ alpha:\n+ - 47.0\n+ - 8.0\n+ - 62.0\n+ - 142.0\n+ - 3.0\n+ - 6.0\n+ - 100.0\n+ - 48.0\n+ - 23.0\n+ - 71.0\n+ - 61.0\n+ - 13.0\n+ - 16.0\n+ - 518.0\n+ - 30.0\n+ - 7.0\n+ - 4.0\n+ - 23.0\n+ - 8.0\n+ - 10.0\n+ - 11.0\n+ - 11.0\n+ - 18.0\n+ - 121.0\n+ - 11.0\n+ - 4.0\n+ - 32.0\n+ - 30.0\n+ - 9.0\n+ - 22.0\n+ - 6.0\n+ - 23.0\n+ - 5.0\n+ - 54.0\n+ beta:\n+ - 12198.0\n+ - 3566.0\n+ - 15993.0\n+ - 35522.0\n+ - 2367.0\n+ - 3100.0\n+ - 24665.0\n+ - 13210.0\n+ - 7061.0\n+ - 18061.0\n+ - 17449.0\n+ - 5644.0\n+ - 6787.0\n+ - 111326.0\n+ - 8776.0\n+ - 3334.0\n+ - 2271.0\n+ - 7389.0\n+ - 2659.0\n+ - 3665.0\n+ - 4724.0\n+ - 3561.0\n+ - 5085.0\n+ - 27407.0\n+ - 4601.0\n+ - 2923.0\n+ - 8897.0\n+ - 8654.0\n+ - 4000.0\n+ - 6580.0\n+ - 3174.0\n+ - 6766.0\n+ - 2602.0\n+ - 14506.0\n+women:\n+ alpha:\n+ - 12.0\n+ - 7.0\n+ - 984.0\n+ - 13.0\n+ - 15.0\n+ - 15.0\n+ - 11.0\n+ - 14.0\n+ - 9.0\n+ - 200.0\n+ - 72.0\n+ - 3.0\n+ - 14.0\n+ - 49.0\n+ - 1278.0\n+ - 3.0\n+ - 325.0\n+ - 14.0\n+ - 27.0\n+ - 14.0\n+ - 169.0\n+ - 48.0\n+ - 47.0\n+ - 18.0\n+ - 40.0\n+ - 12.0\n+ - 447.0\n+ - 46.0\n+ - 5.0\n+ - 3.0\n+ - 5.0\n+ - 7.0\n+ - 35.0\n+ - 34.0\n+ - 99.0\n+ - 30.0\n+ - 880.0\n+ - 51.0\n+ - 182.0\n+ - 6.0\n+ - 45.0\n+ - 39.0\n+ - 10.0\n+ - 24.0\n+ - 72.0\n+ - 229.0\n+ beta:\n+ - 3612.0\n+ - 3173.0\n+ - 204484.0\n+ - 4517.0\n+ - 4765.0\n+ - 5331.0\n+ - 4131.0\n+ - 4728.0\n+ - 4028.0\n+ - 44280.0\n+ - 17918.0\n+ - 2309.0\n+ - 4339.0\n+ - 12922.0\n+ - 270771.0\n+ - 2480.0\n+ - 68475.0\n+ - 5129.0\n+ - 7367.0\n+ - 5819.0\n+ - 38026.0\n+ - 13047.0\n+ - 11604.0\n+ - 5394.0\n+ - 10912.0\n+ - 4439.0\n+ - 94485.0\n+ - 10700.0\n+ - 2679.0\n+ - 2319.0\n+ - 2578.0\n+ - 3288.0\n+ - 9566.0\n+ - 9775.0\n+ - 20120.0\n+ - 7317.0\n+ - 172026.0\n+ - 13673.0\n+ - 37329.0\n+ - 3365.0\n+ - 10911.0\n+ - 10734.0\n+ - 4278.0\n+ - 7574.0\n+ - 16826.0\n+ - 47462.0\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/train_regression_model.py", "diff": "+import time\n+import argparse\n+from pathlib import Path\n+import yaml\n+import pickle\n+\n+import numpy as np\n+import pandas as pd\n+from sklearn.calibration import CalibratedClassifierCV\n+from sklearn.experimental import enable_hist_gradient_boosting\n+from sklearn.ensemble import HistGradientBoostingClassifier\n+from sklearn.linear_model import LogisticRegression\n+from sklearn.metrics import log_loss, roc_auc_score\n+\n+from obp.dataset import OpenBanditDataset\n+from obp.ope import RegressionModel\n+from obp.utils import estimate_confidence_interval_by_bootstrap\n+\n+\n+with open(\"./conf/hyperparam.yaml\", \"rb\") as f:\n+ hyperparams = yaml.safe_load(f)\n+\n+base_model_dict = dict(\n+ logistic_regression=LogisticRegression, lightgbm=HistGradientBoostingClassifier\n+)\n+\n+metrics = [\"auc\", \"rce\"]\n+\n+if __name__ == \"__main__\":\n+ parser = argparse.ArgumentParser(description=\"evaluate off-policy estimators.\")\n+ parser.add_argument(\n+ \"--n_boot_samples\",\n+ type=int,\n+ default=1,\n+ help=\"number of bootstrap samples in the experiment.\",\n+ )\n+ parser.add_argument(\n+ \"--base_model\",\n+ type=str,\n+ choices=[\"logistic_regression\", \"lightgbm\"],\n+ required=True,\n+ help=\"base ML model for regression model, logistic_regression or lightgbm.\",\n+ )\n+ parser.add_argument(\n+ \"--behavior_policy\",\n+ type=str,\n+ choices=[\"bts\", \"random\"],\n+ required=True,\n+ help=\"behavior policy, bts or random.\",\n+ )\n+ parser.add_argument(\n+ \"--campaign\",\n+ type=str,\n+ choices=[\"all\", \"men\", \"women\"],\n+ required=True,\n+ help=\"campaign name, men, women, or all.\",\n+ )\n+ parser.add_argument(\"--random_state\", type=int, default=12345)\n+ args = parser.parse_args()\n+ print(args)\n+\n+ # configurations of the benchmark experiment\n+ n_boot_samples = args.n_boot_samples\n+ base_model = args.base_model\n+ behavior_policy = args.behavior_policy\n+ campaign = args.campaign\n+ random_state = args.random_state\n+ data_path = Path(\"../open_bandit_dataset\")\n+ # prepare path\n+ log_path = Path(\"./logs\") / behavior_policy / campaign / base_model\n+ reg_model_path = log_path / \"trained_reg_models\"\n+ reg_model_path.mkdir(exist_ok=True, parents=True)\n+\n+ obd = OpenBanditDataset(\n+ behavior_policy=behavior_policy, campaign=campaign, data_path=data_path\n+ )\n+ # a base ML model for regression model\n+ reg_model = RegressionModel(\n+ base_model=CalibratedClassifierCV(\n+ base_model_dict[base_model](**hyperparams[base_model])\n+ )\n+ )\n+\n+ start_time = time.time()\n+ performance_of_reg_model = {\n+ metrics[i]: np.zeros(n_boot_samples) for i in np.arange(len(metrics))\n+ }\n+ for b in np.arange(n_boot_samples):\n+ # sample bootstrap from batch logged bandit feedback\n+ boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(random_state=b)\n+ # train a regression model on logged bandit feedback data\n+ reg_model.fit(\n+ context=boot_bandit_feedback[\"context\"],\n+ action=boot_bandit_feedback[\"action\"],\n+ reward=boot_bandit_feedback[\"reward\"],\n+ pscore=boot_bandit_feedback[\"pscore\"],\n+ action_context=boot_bandit_feedback[\"action_context\"],\n+ )\n+ # evaluate the (in-sample) estimation performance of the regression model by AUC and RCE\n+ predicted_rewards = reg_model.predict(\n+ context=boot_bandit_feedback[\"context\"],\n+ action_context=boot_bandit_feedback[\"action_context\"],\n+ selected_actions=np.expand_dims(boot_bandit_feedback[\"action\"], 1),\n+ position=np.zeros(boot_bandit_feedback[\"n_rounds\"], dtype=int),\n+ )\n+ rewards = boot_bandit_feedback[\"reward\"]\n+ performance_of_reg_model[\"auc\"][b] = roc_auc_score(\n+ y_true=rewards, y_score=predicted_rewards\n+ )\n+ rce_mean = log_loss(\n+ y_true=rewards, y_pred=np.ones_like(rewards) * np.mean(rewards)\n+ )\n+ rce_clf = log_loss(y_true=rewards, y_pred=predicted_rewards)\n+ performance_of_reg_model[\"rce\"][b] = (rce_mean - rce_clf) / rce_clf\n+\n+ # save trained regression model in a pickled form\n+ pickle.dump(\n+ reg_model, open(reg_model_path / f\"reg_model_{b}.pkl\", \"wb\"),\n+ )\n+ print(\n+ f\"Finished {b+1}th bootstrap sample:\",\n+ f\"{np.round((time.time() - start_time) / 60, 1)}min\",\n+ )\n+\n+ # estimate confidence intervals of the performances of the regression model\n+ performance_of_reg_model_with_ci = {}\n+ for metric in metrics:\n+ performance_of_reg_model_with_ci[\n+ metric\n+ ] = estimate_confidence_interval_by_bootstrap(\n+ samples=performance_of_reg_model[metric], random_state=random_state\n+ )\n+ performance_of_reg_model_df = pd.DataFrame(performance_of_reg_model_with_ci).T\n+\n+ print(\"=\" * 50)\n+ print(f\"random_state={random_state}\")\n+ print(\"-\" * 50)\n+ print(performance_of_reg_model_df)\n+ print(\"=\" * 50)\n+\n+ # save performance of the regression model in './logs' directory.\n+ performance_of_reg_model_df.to_csv(log_path / f\"performance_of_reg_model.csv\")\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add benchmarking experiments on OPE estimators
641,014
02.09.2020 23:08:25
-32,400
09226ee49037361c1f119aa7154dd2eddeb42b31
add `convert_to_action_dist` func
[ { "change_type": "MODIFY", "old_path": "obp/utils.py", "new_path": "obp/utils.py", "diff": "@@ -59,6 +59,34 @@ def estimate_confidence_interval_by_bootstrap(\n}\n+def convert_to_action_dist(n_actions: int, selected_actions: np.ndarray,) -> np.ndarray:\n+ \"\"\"Convert selected actions (output of `run_bandit_simulation`) to distribution over actions.\n+\n+ Parameters\n+ ----------\n+ n_actions: int\n+ Number of actions.\n+\n+ selected_actions: array-like, shape (n_rounds, len_list)\n+ Sequence of actions selected by evaluation policy\n+ at each round in offline bandit simulation.\n+\n+ Returns\n+ ----------\n+ action_dist: array-like shape (n_rounds, n_actions, len_list)\n+ Distribution over actions, i.e., probability of items being selected at each position (can be deterministic).\n+\n+ \"\"\"\n+ n_rounds, len_list = selected_actions.shape\n+ action_dist = np.zeros((n_rounds, n_actions, len_list))\n+ for pos in np.arange(len_list):\n+ selected_actions_ = selected_actions[:, pos]\n+ action_dist[\n+ np.arange(n_rounds), selected_actions_, pos * np.ones(n_rounds, int),\n+ ] = 1\n+ return action_dist\n+\n+\n@_deprecate_positional_args\ndef check_is_fitted(\nestimator: BaseEstimator, attributes=None, *, msg: str = None, all_or_any=all\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add `convert_to_action_dist` func
641,014
02.09.2020 23:08:53
-32,400
0b42be36695e948657ce3ccf48e9f370cea7da44
modify the output of the offline bandit simulator
[ { "change_type": "MODIFY", "old_path": "obp/simulator/simulator.py", "new_path": "obp/simulator/simulator.py", "diff": "@@ -6,7 +6,7 @@ from tqdm import tqdm\nimport numpy as np\n-from ..utils import check_bandit_feedback_inputs\n+from ..utils import check_bandit_feedback_inputs, convert_to_action_dist\nfrom ..types import BanditFeedback, BanditPolicy\n@@ -21,13 +21,12 @@ def run_bandit_simulation(\nLogged bandit feedback data to be used in offline bandit simulation.\npolicy: BanditPolicy\n- Bandit policy to be evaluated in offline bandit simulation (i.e., counterfactual or evaluation policy).\n+ Bandit policy to be evaluated in offline bandit simulation (i.e., evaluation policy).\nReturns\n--------\n- selected_actions: array-like, shape (n_rounds, len_list)\n- Sequence of list of actions selected by counterfactual (or evaluation) policy\n- at each round in offline bandit simulation.\n+ action_dist: array-like shape (n_rounds, n_actions, len_list)\n+ Distribution over actions, i.e., probability of items being selected at each position (can be deterministic).\n\"\"\"\nfor key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\n@@ -73,5 +72,8 @@ def run_bandit_simulation(\n)\nselected_actions_list.append(selected_actions)\n- return np.array(selected_actions_list)\n-\n+ action_dist = convert_to_action_dist(\n+ n_actions=bandit_feedback[\"action\"].max() + 1,\n+ selected_actions=np.array(selected_actions_list),\n+ )\n+ return action_dist\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
modify the output of the offline bandit simulator
641,014
08.09.2020 13:14:12
-32,400
17a7417b7a569f7214ef18580c4c51b4a27d13f6
add time series split case to benchmark
[ { "change_type": "MODIFY", "old_path": "benchmark/ope/benchmark_off_policy_estimators.py", "new_path": "benchmark/ope/benchmark_off_policy_estimators.py", "diff": "@@ -65,6 +65,17 @@ if __name__ == \"__main__\":\nrequired=True,\nhelp=\"campaign name, men, women, or all.\",\n)\n+ parser.add_argument(\n+ \"--test_size\",\n+ type=float,\n+ default=0.3,\n+ help=\"the proportion of the dataset to include in the test split.\",\n+ )\n+ parser.add_argument(\n+ \"--is_timeseries_split\",\n+ action=\"store_true\",\n+ help=\"If true, split the original logged badnit feedback data by time series.\",\n+ )\nparser.add_argument(\"--random_state\", type=int, default=12345)\nargs = parser.parse_args()\nprint(args)\n@@ -75,11 +86,18 @@ if __name__ == \"__main__\":\ncounterfactual_policy = args.counterfactual_policy\nbehavior_policy = args.behavior_policy\ncampaign = args.campaign\n+ test_size = args.test_size\n+ is_timeseries_split = args.is_timeseries_split\nrandom_state = args.random_state\ndata_path = Path(\"../open_bandit_dataset\")\n# prepare path\nlog_path = Path(\"./logs\") / behavior_policy / campaign / base_model\n- reg_model_path = log_path / \"trained_reg_models\"\n+ reg_model_path = (\n+ log_path / \"trained_reg_models_out_sample\"\n+ if is_timeseries_split\n+ else log_path / \"trained_reg_models\"\n+ )\n+ reg_model_path.mkdir(exist_ok=True, parents=True)\nobd = OpenBanditDataset(\nbehavior_policy=behavior_policy, campaign=campaign, data_path=data_path\n@@ -104,9 +122,16 @@ if __name__ == \"__main__\":\n# ground-truth policy value of a counterfactual policy\n# , which is estimated with factual (observed) rewards (on-policy estimation)\nground_truth_policy_value = OpenBanditDataset.calc_on_policy_policy_value_estimate(\n- behavior_policy=counterfactual_policy, campaign=campaign, data_path=data_path\n+ behavior_policy=counterfactual_policy,\n+ campaign=campaign,\n+ data_path=data_path,\n+ test_size=test_size,\n+ is_timeseries_split=is_timeseries_split,\n)\n+ ope_results = {\n+ est.estimator_name: np.zeros(n_boot_samples) for est in ope_estimators\n+ }\nevaluation_of_ope_results = {\nest.estimator_name: np.zeros(n_boot_samples) for est in ope_estimators\n}\n@@ -115,7 +140,9 @@ if __name__ == \"__main__\":\nwith open(reg_model_path / f\"reg_model_{b}.pkl\", \"rb\") as f:\nreg_model = pickle.load(f)\n# sample bootstrap from batch logged bandit feedback\n- boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(random_state=b)\n+ boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(\n+ test_size=test_size, is_timeseries_split=is_timeseries_split, random_state=b\n+ )\n# run a counterfactual bandit algorithm on logged bandit feedback data\nselected_actions = run_bandit_simulation(\nbandit_feedback=boot_bandit_feedback, policy=policy\n@@ -127,12 +154,21 @@ if __name__ == \"__main__\":\nregression_model=reg_model,\nope_estimators=ope_estimators,\n)\n+ estimated_policy_values = ope.estimate_policy_values(\n+ selected_actions=selected_actions,\n+ )\nrelative_estimation_errors = ope.evaluate_performance_of_estimators(\nselected_actions=selected_actions,\nground_truth_policy_value=ground_truth_policy_value,\n)\npolicy.initialize()\n- # store relative estimation errors of OPE estimators at each split\n+ # store estimated policy values by OPE estimators at each bootstrap\n+ for (\n+ estimator_name,\n+ estimated_policy_value,\n+ ) in estimated_policy_values.items():\n+ ope_results[estimator_name][b] = estimated_policy_value\n+ # store relative estimation errors of OPE estimators at each bootstrap\nfor (\nestimator_name,\nrelative_estimation_error,\n@@ -140,15 +176,20 @@ if __name__ == \"__main__\":\nevaluation_of_ope_results[estimator_name][b] = relative_estimation_error\n# estimate confidence intervals of relative estimation by nonparametric bootstrap method\n+ ope_results_with_ci = {est.estimator_name: dict() for est in ope_estimators}\nevaluation_of_ope_results_with_ci = {\nest.estimator_name: dict() for est in ope_estimators\n}\n- for estimator_name in evaluation_of_ope_results_with_ci.keys():\n+ for estimator_name in ope_results_with_ci.keys():\n+ ope_results_with_ci[estimator_name] = estimate_confidence_interval_by_bootstrap(\n+ samples=ope_results[estimator_name], random_state=random_state\n+ )\nevaluation_of_ope_results_with_ci[\nestimator_name\n] = estimate_confidence_interval_by_bootstrap(\nsamples=evaluation_of_ope_results[estimator_name], random_state=random_state\n)\n+ ope_results_df = pd.DataFrame(ope_results_with_ci).T\nevaluation_of_ope_results_df = pd.DataFrame(evaluation_of_ope_results_with_ci).T\nprint(\"=\" * 50)\n@@ -158,4 +199,5 @@ if __name__ == \"__main__\":\nprint(\"=\" * 50)\n# save results of the evaluation of off-policy estimators in './logs' directory.\n+ ope_results_df.to_csv(log_path / f\"estimated_policy_values_by_ope_estimators.csv\")\nevaluation_of_ope_results_df.to_csv(log_path / f\"comparison_of_ope_estimators.csv\")\n" }, { "change_type": "MODIFY", "old_path": "benchmark/ope/train_regression_model.py", "new_path": "benchmark/ope/train_regression_model.py", "diff": "@@ -55,6 +55,17 @@ if __name__ == \"__main__\":\nrequired=True,\nhelp=\"campaign name, men, women, or all.\",\n)\n+ parser.add_argument(\n+ \"--test_size\",\n+ type=float,\n+ default=0.3,\n+ help=\"the proportion of the dataset to include in the test split.\",\n+ )\n+ parser.add_argument(\n+ \"--is_timeseries_split\",\n+ action=\"store_true\",\n+ help=\"If true, split the original logged badnit feedback data by time series.\",\n+ )\nparser.add_argument(\"--random_state\", type=int, default=12345)\nargs = parser.parse_args()\nprint(args)\n@@ -64,11 +75,17 @@ if __name__ == \"__main__\":\nbase_model = args.base_model\nbehavior_policy = args.behavior_policy\ncampaign = args.campaign\n+ test_size = args.test_size\n+ is_timeseries_split = args.is_timeseries_split\nrandom_state = args.random_state\ndata_path = Path(\"../open_bandit_dataset\")\n# prepare path\nlog_path = Path(\"./logs\") / behavior_policy / campaign / base_model\n- reg_model_path = log_path / \"trained_reg_models\"\n+ reg_model_path = (\n+ log_path / \"trained_reg_models_out_sample\"\n+ if is_timeseries_split\n+ else log_path / \"trained_reg_models\"\n+ )\nreg_model_path.mkdir(exist_ok=True, parents=True)\nobd = OpenBanditDataset(\n@@ -87,7 +104,9 @@ if __name__ == \"__main__\":\n}\nfor b in np.arange(n_boot_samples):\n# sample bootstrap from batch logged bandit feedback\n- boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(random_state=b)\n+ boot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(\n+ test_size=test_size, is_timeseries_split=is_timeseries_split, random_state=b\n+ )\n# train a regression model on logged bandit feedback data\nreg_model.fit(\ncontext=boot_bandit_feedback[\"context\"],\n@@ -97,11 +116,12 @@ if __name__ == \"__main__\":\naction_context=boot_bandit_feedback[\"action_context\"],\n)\n# evaluate the (in-sample) estimation performance of the regression model by AUC and RCE\n+ # TODO: out-sample?\npredicted_rewards = reg_model.predict(\ncontext=boot_bandit_feedback[\"context\"],\naction_context=boot_bandit_feedback[\"action_context\"],\nselected_actions=np.expand_dims(boot_bandit_feedback[\"action\"], 1),\n- position=np.zeros(boot_bandit_feedback[\"n_rounds\"], dtype=int),\n+ position=np.zeros_like(boot_bandit_feedback[\"action\"], int),\n)\nrewards = boot_bandit_feedback[\"reward\"]\nperformance_of_reg_model[\"auc\"][b] = roc_auc_score(\n" }, { "change_type": "MODIFY", "old_path": "obp/dataset/real.py", "new_path": "obp/dataset/real.py", "diff": "@@ -91,7 +91,12 @@ class OpenBanditDataset(BaseRealBanditDataset):\n@classmethod\ndef calc_on_policy_policy_value_estimate(\n- cls, behavior_policy: str, campaign: str, data_path: Path = Path(\"./obd\")\n+ cls,\n+ behavior_policy: str,\n+ campaign: str,\n+ data_path: Path = Path(\"./obd\"),\n+ test_size: float = 0.3,\n+ is_timeseries_split: bool = False,\n) -> float:\n\"\"\"Calculate on-policy policy value estimate (used as a ground-truth policy value).\n@@ -107,6 +112,12 @@ class OpenBanditDataset(BaseRealBanditDataset):\ndata_path: Path, default: Path('./obd')\nPath that stores Open Bandit Dataset.\n+ test_size: float, default=0.3\n+ If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n+\n+ is_timeseries_split: bool, default: False\n+ If true, split the original logged badnit feedback data by time series.\n+\nReturns\n---------\non_policy_policy_value_estimate: float\n@@ -114,13 +125,20 @@ class OpenBanditDataset(BaseRealBanditDataset):\nThis parameter is used as a ground-truth policy value in the evaluation of OPE estimators.\n\"\"\"\n- return cls(\n- behavior_policy=behavior_policy, campaign=campaign, data_path=data_path\n- ).reward.mean()\n+ return (\n+ cls(behavior_policy=behavior_policy, campaign=campaign, data_path=data_path)\n+ .obtain_batch_bandit_feedback(\n+ test_size=test_size, is_timeseries_split=is_timeseries_split\n+ )[\"reward_test\"]\n+ .mean()\n+ )\ndef load_raw_data(self) -> None:\n\"\"\"Load raw open bandit dataset.\"\"\"\nself.data = pd.read_csv(self.data_path / self.raw_data_file, index_col=0)\n+ self.item_context = pd.read_csv(\n+ self.data_path / \"item_context.csv\", index_col=0\n+ )\nself.data.sort_values(\"timestamp\", inplace=True)\nself.action = self.data[\"item_id\"].values\nself.position = (rankdata(self.data[\"position\"].values, \"dense\") - 1).astype(\n@@ -130,38 +148,89 @@ class OpenBanditDataset(BaseRealBanditDataset):\nself.pscore = self.data[\"propensity_score\"].values\ndef pre_process(self) -> None:\n- \"\"\"Preprocess raw open bandit dataset.\"\"\"\n+ \"\"\"Preprocess raw open bandit dataset.\n+\n+ Note\n+ -----\n+ This is the default feature engineering and please overide this method to\n+ implement your own preprocessing.\n+ see https://github.com/st-tech/zr-obp/blob/master/examples/examples_with_obd/custom_dataset.py for example.\n+\n+ \"\"\"\nuser_cols = self.data.columns.str.contains(\"user_feature\")\nself.context = pd.get_dummies(\nself.data.loc[:, user_cols], drop_first=True\n).values\n- item_context = pd.read_csv(self.data_path / \"item_context.csv\", index_col=0)\n- item_feature_0 = item_context[\"item_feature_0\"]\n- item_feature_cat = item_context.drop(\"item_feature_0\", 1).apply(\n+ item_feature_0 = self.item_context[\"item_feature_0\"]\n+ item_feature_cat = self.item_context.drop(\"item_feature_0\", 1).apply(\nLabelEncoder().fit_transform\n)\nself.action_context = pd.concat([item_feature_cat, item_feature_0], 1).values\n- def obtain_batch_bandit_feedback(self) -> BanditFeedback:\n- \"\"\"Obtain batch logged bandit feedback.\"\"\"\n+ def obtain_batch_bandit_feedback(\n+ self, test_size: float = 0.3, is_timeseries_split: bool = False\n+ ) -> BanditFeedback:\n+ \"\"\"Obtain batch logged bandit feedback.\n+\n+ Parameters\n+ -----------\n+ test_size: float, default=0.3\n+ If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n+\n+ is_timeseries_split: bool, default: False\n+ If true, split the original logged badnit feedback data by time series.\n+\n+ Returns\n+ --------\n+ bandit_feedback: BanditFeedback\n+ Logged bandit feedback collected by the behavior policy.\n+\n+ \"\"\"\n+ if is_timeseries_split:\n+ assert isinstance(test_size, float) & (\n+ 0 < test_size < 1\n+ ), f\"test_size must be a float between 0 and 1, but {test_size} is given\"\n+ n_rounds_train = np.int(self.n_rounds * (1.0 - test_size))\n+ return dict(\n+ n_rounds=n_rounds_train,\n+ n_actions=self.n_actions,\n+ action=self.action[:n_rounds_train],\n+ position=self.position[:n_rounds_train],\n+ reward=self.reward[:n_rounds_train],\n+ reward_test=self.reward[n_rounds_train:],\n+ pscore=self.pscore[:n_rounds_train],\n+ context=self.context[:n_rounds_train],\n+ action_context=self.action_context,\n+ )\n+ else:\nreturn dict(\nn_rounds=self.n_rounds,\nn_actions=self.n_actions,\naction=self.action,\nposition=self.position,\nreward=self.reward,\n+ reward_test=self.reward,\npscore=self.pscore,\ncontext=self.context,\naction_context=self.action_context,\n)\ndef sample_bootstrap_bandit_feedback(\n- self, random_state: Optional[int] = None\n+ self,\n+ test_size: float = 0.3,\n+ is_timeseries_split: bool = False,\n+ random_state: Optional[int] = None,\n) -> BanditFeedback:\n\"\"\"Sample bootstrap logged bandit feedback.\nParameters\n-----------\n+ test_size: float, default=0.3\n+ If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n+\n+ is_timeseries_split: bool, default: False\n+ If true, split the original logged badnit feedback data by time series.\n+\nrandom_state: int, default: None\nControls the random seed in sampling logged bandit dataset.\n@@ -171,18 +240,12 @@ class OpenBanditDataset(BaseRealBanditDataset):\nBootstrapped logged bandit feedback independently sampled from the original data with replacement.\n\"\"\"\n- random_ = check_random_state(random_state)\n- bootstrap_idx = random_.choice(\n- np.arange(self.n_rounds), size=self.n_rounds, replace=True\n- )\n-\n- return dict(\n- n_rounds=self.n_rounds,\n- n_actions=self.n_actions,\n- action=self.action[bootstrap_idx],\n- position=self.position[bootstrap_idx],\n- reward=self.reward[bootstrap_idx],\n- pscore=self.pscore[bootstrap_idx],\n- context=self.context[bootstrap_idx, :],\n- action_context=self.action_context,\n+ bandit_feedback = self.obtain_batch_bandit_feedback(\n+ test_size=test_size, is_timeseries_split=is_timeseries_split\n)\n+ n_rounds = bandit_feedback[\"n_rounds\"]\n+ random_ = check_random_state(random_state)\n+ bootstrap_idx = random_.choice(np.arange(n_rounds), size=n_rounds, replace=True)\n+ for key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\n+ bandit_feedback[key_] = bandit_feedback[key_][bootstrap_idx]\n+ return bandit_feedback\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add time series split case to benchmark
641,014
08.09.2020 15:56:02
-32,400
830ef065263e432f3fdbb293c5863df561f2e7aa
modify OPL and regression model to adjust to the list structure
[ { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -15,7 +15,7 @@ import seaborn as sns\nfrom .estimators import BaseOffPolicyEstimator\nfrom .regression_model import RegressionModel\nfrom ..types import BanditFeedback\n-from ..utils import check_is_fitted, check_bandit_feedback_inputs\n+from ..utils import check_is_fitted\nlogger = getLogger(__name__)\n@@ -98,9 +98,6 @@ class OffPolicyEvaluation:\nfor key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\nif key_ not in self.bandit_feedback:\nraise RuntimeError(f\"Missing key of {key_} in 'bandit_feedback'.\")\n- self.n_rounds = self.bandit_feedback[\"action\"].shape[0]\n- self.n_actions = self.bandit_feedback[\"action\"].max() + 1\n- self.len_list = self.bandit_feedback[\"position\"].max() + 1\nif self.regression_model is not None:\nif check_is_fitted(self.regression_model.base_model):\n@@ -109,19 +106,13 @@ class OffPolicyEvaluation:\nlogger.info(\n\"the given regression model is not fitted, and thus train it here...\"\n)\n- check_bandit_feedback_inputs(\n- context=self.bandit_feedback[\"context\"],\n- action=self.bandit_feedback[\"action\"],\n- reward=self.bandit_feedback[\"reward\"],\n- pscore=self.bandit_feedback[\"pscore\"],\n- action_context=self.action_context,\n- )\nself.regression_model.fit(\ncontext=self.bandit_feedback[\"context\"],\naction=self.bandit_feedback[\"action\"],\nreward=self.bandit_feedback[\"reward\"],\npscore=self.bandit_feedback[\"pscore\"],\naction_context=self.action_context,\n+ position=self.bandit_feedback[\"position\"],\n)\nelse:\nlogger.warning(\n@@ -142,20 +133,8 @@ class OffPolicyEvaluation:\n}\nestimator_inputs[\"action_dist\"] = action_dist\nif self.regression_model is not None:\n- ones_n_rounds_arr = np.ones(self.n_rounds, int)\n- estimated_rewards_by_reg_model = np.zeros(\n- (self.n_rounds, self.n_actions, self.len_list)\n- )\n- for action_ in np.arange(self.n_actions):\n- for position_ in np.arange(self.len_list):\n- estimated_rewards_by_reg_model[\n- np.arange(self.n_rounds),\n- action_ * ones_n_rounds_arr,\n- position_ * ones_n_rounds_arr,\n- ] = self.regression_model.predict(\n+ estimated_rewards_by_reg_model = self.regression_model.predict(\ncontext=self.bandit_feedback[\"context\"],\n- action=action_ * ones_n_rounds_arr,\n- position=position_ * ones_n_rounds_arr,\naction_context=self.action_context,\n)\nestimator_inputs[\n" }, { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -6,7 +6,9 @@ from dataclasses import dataclass\nfrom typing import Optional\nimport numpy as np\n-from sklearn.base import BaseEstimator, is_classifier\n+from sklearn.base import BaseEstimator, clone, is_classifier\n+\n+from ..utils import check_bandit_feedback_inputs\n@dataclass\n@@ -22,6 +24,13 @@ class RegressionModel:\nbase_model: BaseEstimator\nModel class to be used to predict the mean reward function.\n+ n_actions: int\n+ Number of actions.\n+\n+ len_list: int, default: 1\n+ Length of a list of recommended actions in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\nfitting_method: str, default='normal'\nMethod to fit the regression method.\nMust be one of ['normal', 'iw', 'mrdr'] where 'iw' stands for importance weighting and\n@@ -35,6 +44,8 @@ class RegressionModel:\n\"\"\"\nbase_model: BaseEstimator\n+ n_actions: int\n+ len_list: int = 1\nfitting_method: str = \"normal\"\ndef __post_init__(self) -> None:\n@@ -44,6 +55,15 @@ class RegressionModel:\n\"iw\",\n\"mrdr\",\n], f\"fitting method must be one of 'normal', 'iw', or 'mrdr', but {self.fitting_method} is given\"\n+ assert self.n_actions > 1 and isinstance(\n+ self.n_actions, int\n+ ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n+ assert self.len_list > 0 and isinstance(\n+ self.len_list, int\n+ ), f\"len_list must be a positive integer, but {self.len_list} is given\"\n+ self.base_model_list = [\n+ clone(self.base_model) for _ in np.arange(self.len_list)\n+ ]\ndef fit(\nself,\n@@ -52,6 +72,7 @@ class RegressionModel:\nreward: np.ndarray,\npscore: np.ndarray,\naction_context: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n) -> None:\n\"\"\"Fit the regression model on given logged bandit feedback data.\n@@ -73,75 +94,98 @@ class RegressionModel:\naction_context: array-like, shape (n_actions, dim_action_context)\nContext vector characterizing each action.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given training logged bandit feedback.\n+ If None is given, a learner assumes that there is only one position.\n+ When `len_list` > 1, position has to be set.\n+\n\"\"\"\n+ check_bandit_feedback_inputs(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ action_context=action_context,\n+ )\n+ if position is None:\n+ assert self.len_list == 1, \"position has to be set when len_list is 1\"\n+ position = np.zeros_like(action)\n+ for position_ in np.arange(self.len_list):\n# create context vector to make predictions\nX = self._pre_process_for_reg_model(\n- context=context, action=action, action_context=action_context,\n+ context=context[position == position_],\n+ action=action[position == position_],\n+ action_context=action_context,\n)\n# train the base model according to the given `fitting method`\nif self.fitting_method == \"normal\":\n- self.base_model.fit(X, reward)\n+ self.base_model_list[position_].fit(X, reward[position == position_])\nelif self.fitting_method == \"iw\":\n- sample_weight = np.mean(pscore) / pscore\n- self.base_model.fit(X, reward, sample_weight=sample_weight)\n+ sample_weight = 1.0 / pscore[position == position_]\n+ self.base_model_list[position_].fit(\n+ X, reward[position == position_], sample_weight=sample_weight\n+ )\nelif self.fitting_method == \"mrdr\":\n- sample_weight = (1.0 - pscore) / pscore ** 2\n- self.base_model.fit(X, reward, sample_weight=sample_weight)\n+ sample_weight = (1.0 - pscore[position == position_]) / (\n+ pscore[position == position_] ** 2\n+ )\n+ self.base_model_list[position_].fit(\n+ X, reward[position == position_], sample_weight=sample_weight\n+ )\n- def predict(\n- self,\n- context: np.ndarray,\n- action_context: np.ndarray,\n- action: np.ndarray,\n- position: Optional[np.ndarray] = None,\n- ) -> np.ndarray:\n+ def predict(self, context: np.ndarray, action_context: np.ndarray,) -> np.ndarray:\n\"\"\"Predict the mean reward function.\nParameters\n-----------\n- context: array-like, shape (n_rounds, dim_context)\n- Context vectors in the given training logged bandit feedback.\n+ context: array-like, shape (n_rounds_of_new_data, dim_context)\n+ Context vectors for new data.\naction_context: array-like, shape shape (n_actions, dim_action_context)\nContext vector characterizing each action.\n- action: array-like, shape (n_rounds,)\n- Selected actions by behavior policy in the given training logged bandit feedback.\n-\n- position: array-like, shape (n_rounds,), default=None\n- Positions of each round in the given training logged bandit feedback.\n-\nReturns\n-----------\n- estimated_rewards: array-like, shape (n_rounds, )\n- Estimated rewards by regression model for each round.\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Estimated rewards by regression model for new data.\n\"\"\"\n+ n_rounds_of_new_data = context.shape[0]\n+ ones_n_rounds_arr = np.ones(n_rounds_of_new_data, int)\n+ estimated_rewards_by_reg_model = np.zeros(\n+ (n_rounds_of_new_data, self.n_actions, self.len_list)\n+ )\n# create context vector to make predictions\n+ for action_ in np.arange(self.n_actions):\n+ for position_ in np.arange(self.len_list):\nX = self._pre_process_for_reg_model(\ncontext=context,\n- action=action,\n+ action=action_ * ones_n_rounds_arr,\naction_context=action_context,\n- position=position,\n)\n# make predictions\n- if is_classifier(self.base_model):\n- return self.base_model.predict_proba(X)[:, 1]\n- else:\n- return self.base_model.predict(X)\n+ estimated_rewards_ = (\n+ self.base_model_list[position_].predict_proba(X)[:, 1]\n+ if is_classifier(self.base_model_list[position_])\n+ else self.base_model_list[position_].predict(X)\n+ )\n+ estimated_rewards_by_reg_model[\n+ np.arange(n_rounds_of_new_data),\n+ action_ * ones_n_rounds_arr,\n+ position_ * ones_n_rounds_arr,\n+ ] = estimated_rewards_\n+ return estimated_rewards_by_reg_model\ndef _pre_process_for_reg_model(\n- self,\n- context: np.ndarray,\n- action_context: np.ndarray,\n- action: np.ndarray,\n- position: Optional[int] = None,\n+ self, context: np.ndarray, action: np.ndarray, action_context: np.ndarray,\n) -> np.ndarray:\n\"\"\"Preprocess feature vectors to train a give regression model.\nNote\n-----\n- Please override this method if you want to use another feature enginnering for training the regression model.\n+ Please override this method if you want to use another feature enginnering\n+ for training the regression model.\nParameters\n-----------\n@@ -151,9 +195,6 @@ class RegressionModel:\naction: array-like, shape (n_rounds,)\nSelected actions by behavior policy in the given training logged bandit feedback.\n- position: array-like, shape (n_rounds,), default=None\n- Positions of each round in the given training logged bandit feedback.\n-\naction_context: array-like, shape shape (n_actions, dim_action_context)\nContext vector characterizing each action.\n" }, { "change_type": "MODIFY", "old_path": "obp/policy/base.py", "new_path": "obp/policy/base.py", "diff": "@@ -7,7 +7,7 @@ from dataclasses import dataclass\nfrom typing import Optional, Tuple\nimport numpy as np\n-from sklearn.base import ClassifierMixin, is_classifier\n+from sklearn.base import clone, ClassifierMixin, is_classifier\nfrom sklearn.utils import check_random_state\nfrom ..utils import check_bandit_feedback_inputs\n@@ -175,8 +175,14 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\nParameters\n-----------\nbase_model: ClassifierMixin\n- Machine learning classifier to be used to estimate the loss function\n- for learning the decision making policy.\n+ Machine learning classifier to be used to train an offline decision making policy.\n+\n+ n_actions: int\n+ Number of actions.\n+\n+ len_list: int, default: 1\n+ Length of a list of recommended actions in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\nReference\n-----------\n@@ -186,10 +192,21 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\n\"\"\"\nbase_model: ClassifierMixin\n+ n_actions: int\n+ len_list: int = 1\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\nassert is_classifier(self.base_model), \"base_model must be a classifier.\"\n+ assert self.n_actions > 1 and isinstance(\n+ self.n_actions, int\n+ ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n+ assert self.len_list > 0 and isinstance(\n+ self.len_list, int\n+ ), f\"len_list must be a positive integer, but {self.len_list} is given\"\n+ self.base_model_list = [\n+ clone(self.base_model) for _ in np.arange(self.len_list)\n+ ]\n@property\ndef policy_type(self) -> str:\n@@ -202,7 +219,7 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\ncontext: np.ndarray,\naction: np.ndarray,\nreward: np.ndarray,\n- pscore: np.ndarray,\n+ pscore: Optional[np.ndarray] = None,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\"\"\"Create training data for off-policy learning.\n@@ -235,6 +252,7 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\naction: np.ndarray,\nreward: np.ndarray,\npscore: Optional[np.ndarray] = None,\n+ position: Optional[np.ndarray] = None,\n) -> None:\n\"\"\"Fits the offline bandit policy according to the given logged bandit feedback data.\n@@ -253,14 +271,33 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\nPropensity scores, the probability of selecting each action by behavior policy,\nin the given training logged bandit feedback.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given training logged bandit feedback.\n+ If None is given, a learner assumes that there is only one position.\n+ When `len_list` > 1, position has to be set.\n+\n\"\"\"\ncheck_bandit_feedback_inputs(\n- context=context, action=action, reward=reward, pscore=pscore,\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n)\n+ if pscore is None:\n+ n_actions = np.int(action.max() + 1)\n+ pscore = np.ones_like(action) / n_actions\n+ if position is None:\n+ assert self.len_list == 1, \"position has to be set when len_list is 1\"\n+ position = np.zeros_like(action)\n+ for position_ in np.arange(self.len_list):\nX, sample_weight, y = self._create_train_data_for_opl(\n- context=context, action=action, reward=reward, pscore=pscore,\n+ context=context[position == position_],\n+ action=action[position == position_],\n+ reward=reward[position == position_],\n+ pscore=pscore[position == position_],\n)\n- self.base_model.fit(X=X, y=y, sample_weight=sample_weight)\n+ self.base_model_list[position_].fit(X=X, y=y, sample_weight=sample_weight)\ndef predict(self, context: np.ndarray) -> None:\n\"\"\"Predict best action for new data.\n@@ -272,16 +309,26 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\nReturns\n-----------\n- pred: array-like, shape (n_rounds_of_new_data, 1)\n+ action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\nPredicted best action for new data.\n+ The resulting distribution is deterministic.\n\"\"\"\nassert (\nisinstance(context, np.ndarray) and context.ndim == 2\n), \"context must be 2-dimensional ndarray\"\n- predicted_actions = self.base_model.predict(context)\n-\n- return np.expand_dims(predicted_actions, 1)\n+ n_rounds_of_new_data = context.shape[0]\n+ action_dist = np.zeros((n_rounds_of_new_data, self.n_actions, self.len_list))\n+ for position_ in np.arange(self.len_list):\n+ predicted_actions_for_the_position = (\n+ self.base_model_list[position_].predict(context).astype(int)\n+ )\n+ action_dist[\n+ np.arange(n_rounds_of_new_data),\n+ predicted_actions_for_the_position,\n+ np.ones(n_rounds_of_new_data, dtype=int) * position_,\n+ ] = 1\n+ return action_dist\ndef predict_proba(self, context: np.ndarray) -> None:\n\"\"\"Predict probabilities of each action being the best one for new data.\n@@ -293,7 +340,7 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\nReturns\n-----------\n- pred_proba: array-like, shape (n_rounds_of_new_data, n_actions)\n+ action_dist: array-like, shape (n_rounds_of_new_data, n_actions)\nProbability estimates of each arm being the best one for new data.\nThe returned estimates for all classes are ordered by the label of classes.\n@@ -301,5 +348,11 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\nassert (\nisinstance(context, np.ndarray) and context.ndim == 2\n), \"context must be 2-dimensional ndarray\"\n-\n- return self.base_model.predict_proba(context)\n+ n_rounds_of_new_data = context.shape[0]\n+ action_dist = np.zeros((n_rounds_of_new_data, self.n_actions, self.len_list))\n+ for position_ in np.arange(self.len_list):\n+ predicted_probas_for_the_position = self.base_model_list[\n+ position_\n+ ].predict_proba(context)\n+ action_dist[:, :, position_] = predicted_probas_for_the_position\n+ return action_dist\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
modify OPL and regression model to adjust to the list structure
641,014
08.09.2020 15:59:21
-32,400
880f240eb673add1a201469fbbaa102461dcb2d7
add benchmark to exclude
[ { "change_type": "MODIFY", "old_path": "setup.py", "new_path": "setup.py", "diff": "@@ -33,7 +33,7 @@ setup(\n\"tqdm>=4.41.1\",\n],\nlicense=\"Apache License\",\n- packages=find_packages(exclude=[\"obd\", \"examples\", \"tests\", \"docs\"]),\n+ packages=find_packages(exclude=[\"benchmark\", \"docs\", \"examples\", \"obd\", \"tests\"]),\nclassifiers=[\n\"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n\"Intended Audience :: Science/Research\",\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add benchmark to exclude
641,014
08.09.2020 23:07:57
-32,400
ff8b81b7ec43035734c348668f7b469f9b63151c
modify the usage of action_context in reg model
[ { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -38,9 +38,6 @@ class OffPolicyEvaluation:\nList of OPE estimators used to evaluate the policy value of evaluation policy.\nEstimators must follow the interface of `obp.ope.BaseOffPolicyEstimator`.\n- action_context: array-like, shape (n_actions, dim_action_context), default: None\n- Context vectors used as input to predict the mean reward function.\n-\nregression_model: RegressionModel, default: None\nRegression model that predicts the mean reward function :math:`E[Y | X, A]`.\n@@ -90,7 +87,6 @@ class OffPolicyEvaluation:\nbandit_feedback: BanditFeedback\nope_estimators: List[BaseOffPolicyEstimator]\n- action_context: Optional[np.ndarray] = None\nregression_model: Optional[RegressionModel] = None\ndef __post_init__(self) -> None:\n@@ -111,7 +107,6 @@ class OffPolicyEvaluation:\naction=self.bandit_feedback[\"action\"],\nreward=self.bandit_feedback[\"reward\"],\npscore=self.bandit_feedback[\"pscore\"],\n- action_context=self.action_context,\nposition=self.bandit_feedback[\"position\"],\n)\nelse:\n@@ -134,8 +129,7 @@ class OffPolicyEvaluation:\nestimator_inputs[\"action_dist\"] = action_dist\nif self.regression_model is not None:\nestimated_rewards_by_reg_model = self.regression_model.predict(\n- context=self.bandit_feedback[\"context\"],\n- action_context=self.action_context,\n+ context=self.bandit_feedback[\"context\"]\n)\nestimator_inputs[\n\"estimated_rewards_by_reg_model\"\n" }, { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -31,6 +31,9 @@ class RegressionModel:\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n+ action_context: array-like, shape (n_actions, dim_action_context), default=None\n+ Context vector characterizing each action.\n+\nfitting_method: str, default='normal'\nMethod to fit the regression method.\nMust be one of ['normal', 'iw', 'mrdr'] where 'iw' stands for importance weighting and\n@@ -46,6 +49,7 @@ class RegressionModel:\nbase_model: BaseEstimator\nn_actions: int\nlen_list: int = 1\n+ action_context: Optional[np.ndarray] = None\nfitting_method: str = \"normal\"\ndef __post_init__(self) -> None:\n@@ -71,7 +75,6 @@ class RegressionModel:\naction: np.ndarray,\nreward: np.ndarray,\npscore: np.ndarray,\n- action_context: np.ndarray,\nposition: Optional[np.ndarray] = None,\n) -> None:\n\"\"\"Fit the regression model on given logged bandit feedback data.\n@@ -91,9 +94,6 @@ class RegressionModel:\nPropensity scores, the probability of selecting each action by behavior policy,\nin the given training logged bandit feedback.\n- action_context: array-like, shape (n_actions, dim_action_context)\n- Context vector characterizing each action.\n-\nposition: array-like, shape (n_rounds,), default=None\nPositions of each round in the given training logged bandit feedback.\nIf None is given, a learner assumes that there is only one position.\n@@ -106,7 +106,7 @@ class RegressionModel:\nreward=reward,\npscore=pscore,\nposition=position,\n- action_context=action_context,\n+ action_context=self.action_context,\n)\nif position is None:\nassert self.len_list == 1, \"position has to be set when len_list is 1\"\n@@ -116,7 +116,7 @@ class RegressionModel:\nX = self._pre_process_for_reg_model(\ncontext=context[position == position_],\naction=action[position == position_],\n- action_context=action_context,\n+ action_context=self.action_context,\n)\n# train the base model according to the given `fitting method`\nif self.fitting_method == \"normal\":\n@@ -134,7 +134,7 @@ class RegressionModel:\nX, reward[position == position_], sample_weight=sample_weight\n)\n- def predict(self, context: np.ndarray, action_context: np.ndarray,) -> np.ndarray:\n+ def predict(self, context: np.ndarray) -> np.ndarray:\n\"\"\"Predict the mean reward function.\nParameters\n@@ -142,9 +142,6 @@ class RegressionModel:\ncontext: array-like, shape (n_rounds_of_new_data, dim_context)\nContext vectors for new data.\n- action_context: array-like, shape shape (n_actions, dim_action_context)\n- Context vector characterizing each action.\n-\nReturns\n-----------\nestimated_rewards_by_reg_model: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n@@ -162,7 +159,7 @@ class RegressionModel:\nX = self._pre_process_for_reg_model(\ncontext=context,\naction=action_ * ones_n_rounds_arr,\n- action_context=action_context,\n+ action_context=self.action_context,\n)\n# make predictions\nestimated_rewards_ = (\n@@ -178,7 +175,10 @@ class RegressionModel:\nreturn estimated_rewards_by_reg_model\ndef _pre_process_for_reg_model(\n- self, context: np.ndarray, action: np.ndarray, action_context: np.ndarray,\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ action_context: Optional[np.ndarray] = None,\n) -> np.ndarray:\n\"\"\"Preprocess feature vectors to train a give regression model.\n@@ -195,8 +195,11 @@ class RegressionModel:\naction: array-like, shape (n_rounds,)\nSelected actions by behavior policy in the given training logged bandit feedback.\n- action_context: array-like, shape shape (n_actions, dim_action_context)\n+ action_context: array-like, shape shape (n_actions, dim_action_context), default=None\nContext vector characterizing each action.\n\"\"\"\n+ if action_context is None:\n+ return context\n+ else:\nreturn np.c_[context, action_context[action]]\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
modify the usage of action_context in reg model
641,014
09.09.2020 00:43:21
-32,400
c00e324c9c95aef9086611a7e59235d8c8656bae
adjust to the change of reg model
[ { "change_type": "MODIFY", "old_path": "benchmark/ope/benchmark_off_policy_estimators.py", "new_path": "benchmark/ope/benchmark_off_policy_estimators.py", "diff": "@@ -8,7 +8,7 @@ import pandas as pd\nfrom obp.dataset import OpenBanditDataset\nfrom obp.simulator import run_bandit_simulation\n-from obp.policy import Random, BernoulliTS\n+from obp.policy import BernoulliTS\nfrom obp.ope import (\nOffPolicyEvaluation,\nInverseProbabilityWeighting,\n@@ -27,8 +27,6 @@ with open(\"./conf/prior_bts.yaml\", \"rb\") as f:\nwith open(\"./conf/batch_size_bts.yaml\", \"rb\") as f:\nproduction_batch_size_for_bts = yaml.safe_load(f)\n-counterfactual_policy_dict = dict(bts=BernoulliTS, random=Random)\n-\nif __name__ == \"__main__\":\nparser = argparse.ArgumentParser(description=\"evaluate off-policy estimators.\")\nparser.add_argument(\n@@ -44,13 +42,6 @@ if __name__ == \"__main__\":\nrequired=True,\nhelp=\"base ML model for regression model, logistic_regression or lightgbm.\",\n)\n- parser.add_argument(\n- \"--counterfactual_policy\",\n- type=str,\n- choices=[\"bts\", \"random\"],\n- required=True,\n- help=\"counterfactual policy, bts or random.\",\n- )\nparser.add_argument(\n\"--behavior_policy\",\ntype=str,\n@@ -83,15 +74,17 @@ if __name__ == \"__main__\":\n# configurations of the benchmark experiment\nn_boot_samples = args.n_boot_samples\nbase_model = args.base_model\n- counterfactual_policy = args.counterfactual_policy\nbehavior_policy = args.behavior_policy\n+ counterfactual_policy = \"bts\" if behavior_policy == \"random\" else \"random\"\ncampaign = args.campaign\ntest_size = args.test_size\nis_timeseries_split = args.is_timeseries_split\nrandom_state = args.random_state\ndata_path = Path(\"../open_bandit_dataset\")\n# prepare path\n- log_path = Path(\"./logs\") / behavior_policy / campaign / base_model\n+ log_path = (\n+ Path(\"./logs\") / behavior_policy / campaign / \"benchmark_of_ope\" / base_model\n+ )\nreg_model_path = (\nlog_path / \"trained_reg_models_out_sample\"\nif is_timeseries_split\n@@ -106,11 +99,9 @@ if __name__ == \"__main__\":\nkwargs = dict(\nn_actions=obd.n_actions, len_list=obd.len_list, random_state=random_state\n)\n- if counterfactual_policy == \"bts\":\nkwargs[\"alpha\"] = production_prior_for_bts[campaign][\"alpha\"]\nkwargs[\"beta\"] = production_prior_for_bts[campaign][\"beta\"]\nkwargs[\"batch_size\"] = production_batch_size_for_bts[campaign]\n- policy = counterfactual_policy_dict[counterfactual_policy](**kwargs)\n# compared OPE estimators\nope_estimators = [\nDirectMethod(),\n@@ -143,25 +134,28 @@ if __name__ == \"__main__\":\nboot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split, random_state=b\n)\n+ if counterfactual_policy == \"bts\":\n+ policy = BernoulliTS(**kwargs)\n# run a counterfactual bandit algorithm on logged bandit feedback data\n- selected_actions = run_bandit_simulation(\n+ action_dist = run_bandit_simulation(\nbandit_feedback=boot_bandit_feedback, policy=policy\n)\n+ else:\n+ # the random policy has uniformally random distribution over actions\n+ action_dist = np.ones((obd.n_rounds, obd.n_actions, obd.len_list)) * (\n+ 1 / obd.n_actions\n+ )\n# evaluate the estimation performance of OPE estimators\nope = OffPolicyEvaluation(\nbandit_feedback=boot_bandit_feedback,\n- action_context=obd.action_context,\nregression_model=reg_model,\nope_estimators=ope_estimators,\n)\n- estimated_policy_values = ope.estimate_policy_values(\n- selected_actions=selected_actions,\n- )\n+ estimated_policy_values = ope.estimate_policy_values(action_dist=action_dist,)\nrelative_estimation_errors = ope.evaluate_performance_of_estimators(\n- selected_actions=selected_actions,\n+ action_dist=action_dist,\nground_truth_policy_value=ground_truth_policy_value,\n)\n- policy.initialize()\n# store estimated policy values by OPE estimators at each bootstrap\nfor (\nestimator_name,\n" }, { "change_type": "MODIFY", "old_path": "benchmark/ope/train_regression_model.py", "new_path": "benchmark/ope/train_regression_model.py", "diff": "@@ -17,7 +17,7 @@ from obp.ope import RegressionModel\nfrom obp.utils import estimate_confidence_interval_by_bootstrap\n-with open(\"./conf/hyperparam.yaml\", \"rb\") as f:\n+with open(\"./conf/hyperparams.yaml\", \"rb\") as f:\nhyperparams = yaml.safe_load(f)\nbase_model_dict = dict(\n@@ -80,7 +80,9 @@ if __name__ == \"__main__\":\nrandom_state = args.random_state\ndata_path = Path(\"../open_bandit_dataset\")\n# prepare path\n- log_path = Path(\"./logs\") / behavior_policy / campaign / base_model\n+ log_path = (\n+ Path(\"./logs\") / behavior_policy / campaign / \"benchmark_of_ope\" / base_model\n+ )\nreg_model_path = (\nlog_path / \"trained_reg_models_out_sample\"\nif is_timeseries_split\n@@ -91,13 +93,6 @@ if __name__ == \"__main__\":\nobd = OpenBanditDataset(\nbehavior_policy=behavior_policy, campaign=campaign, data_path=data_path\n)\n- # a base ML model for regression model\n- reg_model = RegressionModel(\n- base_model=CalibratedClassifierCV(\n- base_model_dict[base_model](**hyperparams[base_model])\n- )\n- )\n-\nstart_time = time.time()\nperformance_of_reg_model = {\nmetrics[i]: np.zeros(n_boot_samples) for i in np.arange(len(metrics))\n@@ -107,31 +102,52 @@ if __name__ == \"__main__\":\nboot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split, random_state=b\n)\n+ # define a regression model\n+ reg_model = RegressionModel(\n+ n_actions=obd.n_actions,\n+ len_list=obd.len_list,\n+ action_context=boot_bandit_feedback[\"action_context\"],\n+ base_model=CalibratedClassifierCV(\n+ base_model_dict[base_model](**hyperparams[base_model])\n+ ),\n+ )\n# train a regression model on logged bandit feedback data\nreg_model.fit(\ncontext=boot_bandit_feedback[\"context\"],\naction=boot_bandit_feedback[\"action\"],\nreward=boot_bandit_feedback[\"reward\"],\npscore=boot_bandit_feedback[\"pscore\"],\n- action_context=boot_bandit_feedback[\"action_context\"],\n- )\n- # evaluate the (in-sample) estimation performance of the regression model by AUC and RCE\n- # TODO: out-sample?\n- predicted_rewards = reg_model.predict(\n+ position=boot_bandit_feedback[\"position\"],\n+ )\n+ # evaluate the estimation performance of the regression model by AUC and RCE\n+ if is_timeseries_split:\n+ estimated_reward_by_reg_model = reg_model.predict(\n+ context=boot_bandit_feedback[\"context_test\"],\n+ )\n+ rewards = boot_bandit_feedback[\"reward_test\"]\n+ estimated_rewards_ = estimated_reward_by_reg_model[\n+ np.arange(rewards.shape[0]),\n+ boot_bandit_feedback[\"action_test\"].astype(int),\n+ boot_bandit_feedback[\"position_test\"].astype(int),\n+ ]\n+ else:\n+ estimated_reward_by_reg_model = reg_model.predict(\ncontext=boot_bandit_feedback[\"context\"],\n- action_context=boot_bandit_feedback[\"action_context\"],\n- selected_actions=np.expand_dims(boot_bandit_feedback[\"action\"], 1),\n- position=np.zeros_like(boot_bandit_feedback[\"action\"], int),\n)\nrewards = boot_bandit_feedback[\"reward\"]\n+ estimated_rewards_ = estimated_reward_by_reg_model[\n+ np.arange(boot_bandit_feedback[\"n_rounds\"]),\n+ boot_bandit_feedback[\"action\"].astype(int),\n+ boot_bandit_feedback[\"position\"].astype(int),\n+ ]\nperformance_of_reg_model[\"auc\"][b] = roc_auc_score(\n- y_true=rewards, y_score=predicted_rewards\n+ y_true=rewards, y_score=estimated_rewards_\n)\n- rce_mean = log_loss(\n+ rce_mean = -log_loss(\ny_true=rewards, y_pred=np.ones_like(rewards) * np.mean(rewards)\n)\n- rce_clf = log_loss(y_true=rewards, y_pred=predicted_rewards)\n- performance_of_reg_model[\"rce\"][b] = (rce_mean - rce_clf) / rce_clf\n+ rce_clf = -log_loss(y_true=rewards, y_pred=estimated_rewards_)\n+ performance_of_reg_model[\"rce\"][b] = (rce_mean - rce_clf) / rce_mean\n# save trained regression model in a pickled form\npickle.dump(\n@@ -159,4 +175,8 @@ if __name__ == \"__main__\":\nprint(\"=\" * 50)\n# save performance of the regression model in './logs' directory.\n- performance_of_reg_model_df.to_csv(log_path / f\"performance_of_reg_model.csv\")\n+ performance_of_reg_model_df.to_csv(\n+ log_path / f\"performance_of_reg_model_out_sample.csv\"\n+ ) if is_timeseries_split else performance_of_reg_model_df.to_csv(\n+ log_path / f\"performance_of_reg_model_in_sample.csv\"\n+ )\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
adjust to the change of reg model
641,014
09.09.2020 00:43:44
-32,400
d47dc07d9d4a81872d41684a4200a69f88fc494f
add cf policy search codes
[ { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/README.md", "diff": "+\n+\n+\n+### train regression model\n+\n+```\n+for model in logistic_regression lightgbm\n+do\n+ for pi_b in random\n+ do\n+ for camp in men women all\n+ do\n+ screen python train_regression_model.py\\\n+ --n_boot_samples 10\\\n+ --base_model $model\\\n+ --behavior_policy $pi_b\\\n+ --campaign $camp\n+ done\n+ done\n+done\n+```\n+\n+```\n+python train_regression_model.py --n_boot_samples 3 --base_model lightgbm --behavior_policy random --campaign men --is_timeseries_split\n+```\n+\n+\n+### benchmark off-policy estimators\n+\n+```\n+for model in logistic_regression lightgbm\n+do\n+ for pi_b in bts random\n+ do\n+ for camp in men women all\n+ do\n+ benchmark_off_policy_estimators.py\\\n+ --n_boot_samples 10\\\n+ --base_model $model\\\n+ --behavior_policy $pi_b\\\n+ --campaign $camp\\\n+ --is_timeseries_split\n+ done\n+ done\n+done\n+```\n+\n+```\n+python benchmark_off_policy_estimators.py --n_boot_samples 3 --base_model lightgbm --behavior_policy random --campaign men --is_timeseries_split\n+```\n+\n+\n+### run cf policy search\n+\n+```\n+for model in logistic_regression lightgbm random_forest\n+do\n+ for context in 1 2\n+ do\n+ for camp in men women all\n+ do\n+ screen python run_cf_policy_search.py\\\n+ --context_set $context\\\n+ --base_model $model\\\n+ --behavior_policy bts\\\n+ --campaign $camp\n+ done\n+ done\n+done\n+```\n+\n+```\n+python run_cf_policy_search.py --context_set 1 --base_model logistic_regression --campaign men\n+```\n" }, { "change_type": "DELETE", "old_path": "benchmark/ope/conf/hyperparam.yaml", "new_path": null, "diff": "-lightgbm:\n- max_iter: 1000\n- learning_rate: 0.03\n- min_samples_leaf: 5\n- random_state: 12345\n-logistic_regression:\n- max_iter: 10000\n- C: 1000\n- random_state: 12345\n" }, { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/conf/hyperparams.yaml", "diff": "+lightgbm:\n+ max_iter: 300\n+ learning_rate: 0.05\n+ max_depth: 10\n+ min_samples_leaf: 10\n+ random_state: 12345\n+logistic_regression:\n+ max_iter: 10000\n+ C: 1000\n+ random_state: 12345\n+random_forest:\n+ n_estimators: 300\n+ max_depth: 10\n+ min_samples_leaf: 10\n+ random_state: 12345\n" }, { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/custom_dataset.py", "diff": "+from dataclasses import dataclass\n+\n+import numpy as np\n+import pandas as pd\n+from sklearn.decomposition import PCA\n+\n+from obp.dataset import OpenBanditDataset\n+\n+\n+@dataclass\n+class OBDWithInteractionFeatures(OpenBanditDataset):\n+ context_set: str = \"1\"\n+\n+ def pre_process(self) -> None:\n+\n+ if self.context_set == \"1\":\n+ super().pre_process()\n+ elif self.context_set == \"2\":\n+ self._pre_process_context_set_2()\n+\n+ def _pre_process_context_set_1(self) -> None:\n+ \"\"\"Create Context Set 1 (c.f., Section 5.2)\"\"\"\n+\n+ user_cols = self.data.columns.str.contains(\"user_feature\")\n+ self.context = pd.get_dummies(\n+ self.data.loc[:, user_cols], drop_first=True\n+ ).values\n+\n+ def _pre_process_context_set_2(self) -> None:\n+ \"\"\"Create Context Set 2 (c.f., Section 5.2)\"\"\"\n+\n+ super().pre_process()\n+ affinity_cols = self.data.columns.str.contains(\"affinity\")\n+ Xaffinity = self.data.loc[:, affinity_cols].values\n+ self.context = PCA(n_components=30).fit_transform(\n+ np.c_[self.context, Xaffinity]\n+ )\n" }, { "change_type": "ADD", "old_path": null, "new_path": "benchmark/ope/run_cf_policy_search.py", "diff": "+import argparse\n+from pathlib import Path\n+import yaml\n+\n+import pandas as pd\n+import numpy as np\n+from sklearn.linear_model import LogisticRegression\n+from sklearn.ensemble import RandomForestClassifier\n+from sklearn.experimental import enable_hist_gradient_boosting\n+from sklearn.ensemble import HistGradientBoostingClassifier\n+\n+from custom_dataset import OBDWithInteractionFeatures\n+from obp.policy import IPWLearner\n+from obp.ope import InverseProbabilityWeighting\n+\n+# hyperparameter for the regression model used in model dependent OPE estimators\n+with open(\"./conf/hyperparams.yaml\", \"rb\") as f:\n+ hyperparams = yaml.safe_load(f)\n+\n+base_model_dict = dict(\n+ logistic_regression=LogisticRegression,\n+ lightgbm=HistGradientBoostingClassifier,\n+ random_forest=RandomForestClassifier,\n+)\n+\n+if __name__ == \"__main__\":\n+ parser = argparse.ArgumentParser(description=\"run counterfactual policy selection.\")\n+ parser.add_argument(\n+ \"--context_set\",\n+ type=str,\n+ choices=[\"1\", \"2\"],\n+ required=True,\n+ help=\"context sets for contextual bandit policies.\",\n+ )\n+ parser.add_argument(\n+ \"--base_model\",\n+ type=str,\n+ choices=[\"logistic_regression\", \"lightgbm\", \"random_forest\",],\n+ required=True,\n+ help=\"base model for a counterfactual policy to be evaluated\",\n+ )\n+ parser.add_argument(\n+ \"--behavior_policy\",\n+ type=str,\n+ choices=[\"bts\", \"random\"],\n+ default=\"random\",\n+ help=\"behavior policy, bts or random.\",\n+ )\n+ parser.add_argument(\n+ \"--campaign\",\n+ type=str,\n+ choices=[\"all\", \"men\", \"women\"],\n+ required=True,\n+ help=\"campaign name, men, women, or all.\",\n+ )\n+ parser.add_argument(\n+ \"--test_size\",\n+ type=float,\n+ default=0.3,\n+ help=\"the proportion of the dataset to include in the test split.\",\n+ )\n+ parser.add_argument(\"--random_state\", type=int, default=12345)\n+ args = parser.parse_args()\n+ print(args)\n+\n+ context_set = args.context_set\n+ base_model = args.base_model\n+ behavior_policy = args.behavior_policy\n+ campaign = args.campaign\n+ test_size = args.test_size\n+ random_state = args.random_state\n+ np.random.seed(random_state)\n+ data_path = Path(\"../open_bandit_dataset\")\n+\n+ # define a dataset class\n+ obd = OBDWithInteractionFeatures(\n+ behavior_policy=behavior_policy,\n+ campaign=campaign,\n+ data_path=data_path,\n+ context_set=context_set,\n+ )\n+ # define a counterfactual policy\n+ counterfactual_policy = IPWLearner(\n+ base_model=base_model_dict[base_model](**hyperparams[base_model]),\n+ n_actions=obd.n_actions,\n+ len_list=obd.len_list,\n+ )\n+ policy_name = f\"{base_model}_{context_set}\"\n+\n+ # obtain batch logged bandit feedback generated by behavior policy\n+ bandit_feedback = obd.obtain_batch_bandit_feedback(\n+ test_size=test_size, is_timeseries_split=True,\n+ )\n+ # ground-truth policy value of the Bernoulli TS policy (the current best policy) in the test set\n+ # , which is the empirical mean of the factual (observed) rewards (on-policy estimation)\n+ ground_truth = obd.calc_on_policy_policy_value_estimate(\n+ behavior_policy=\"bts\",\n+ campaign=campaign,\n+ data_path=data_path,\n+ test_size=test_size,\n+ is_timeseries_split=True,\n+ )\n+\n+ # train an evaluation on the training set of the logged bandit feedback data\n+ action_dist = counterfactual_policy.fit(\n+ context=bandit_feedback[\"context\"],\n+ action=bandit_feedback[\"action\"],\n+ reward=bandit_feedback[\"reward\"],\n+ pscore=bandit_feedback[\"pscore\"],\n+ position=bandit_feedback[\"position\"],\n+ )\n+ # make predictions\n+ action_dist = counterfactual_policy.predict(context=bandit_feedback[\"context_test\"])\n+ # estimate the policy value of a given counterfactual algorithm by the three OPE estimators.\n+ ipw = InverseProbabilityWeighting()\n+ estimated_policy_value = ipw.estimate_policy_value(\n+ reward=bandit_feedback[\"reward_test\"],\n+ action=bandit_feedback[\"action_test\"],\n+ position=bandit_feedback[\"position_test\"],\n+ pscore=bandit_feedback[\"pscore_test\"],\n+ action_dist=action_dist,\n+ )\n+ estimated_interval = ipw.estimate_interval(\n+ reward=bandit_feedback[\"reward_test\"] / ground_truth,\n+ action=bandit_feedback[\"action_test\"],\n+ position=bandit_feedback[\"position_test\"],\n+ pscore=bandit_feedback[\"pscore_test\"],\n+ action_dist=action_dist,\n+ random_state=random_state,\n+ )\n+ ope_results_df = pd.DataFrame(estimated_interval, index=[\"ipw\"])\n+ ope_results_df[\"relative_estimated_policy_value\"] = (\n+ estimated_policy_value / ground_truth\n+ )\n+\n+ # calculate estimated policy value relative to that of the behavior policy\n+ print(\"=\" * 70)\n+ print(f\"random_state={random_state}: counterfactual policy={policy_name}\")\n+ print(\"-\" * 70)\n+ print(ope_results_df)\n+ print(\"=\" * 70)\n+\n+ # save counterfactual policy evaluation results in `./logs` directory\n+ save_path = Path(\"./logs\") / behavior_policy / campaign / \"cf_policy_selection\"\n+ save_path.mkdir(exist_ok=True, parents=True)\n+ ope_results_df.to_csv(save_path / f\"{policy_name}.csv\")\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add cf policy search codes
641,014
26.09.2020 11:50:21
-32,400
122ce001ef8cc726a33720235a2692049b92928b
update ope estimators
[ { "change_type": "MODIFY", "old_path": "obp/ope/estimators.py", "new_path": "obp/ope/estimators.py", "diff": "@@ -481,6 +481,7 @@ class DirectMethod(BaseOffPolicyEstimator):\ndef _estimate_round_rewards(\nself,\n+ position: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n**kwargs,\n@@ -489,6 +490,9 @@ class DirectMethod(BaseOffPolicyEstimator):\nParameters\n----------\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given training logged bandit feedback.\n+\naction_dist: array-like shape (n_rounds, n_actions, len_list)\nDistribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n@@ -501,12 +505,20 @@ class DirectMethod(BaseOffPolicyEstimator):\nRewards estimated by the DM estimator for each round.\n\"\"\"\n+ n_rounds = position.shape[0]\n+ estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), :, position\n+ ]\n+ action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\nreturn np.average(\n- estimated_rewards_by_reg_model, weights=action_dist, axis=(1, 2)\n+ estimated_rewards_by_reg_model_at_position,\n+ weights=action_dist_at_position,\n+ axis=1,\n)\ndef estimate_policy_value(\nself,\n+ position: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n**kwargs,\n@@ -515,6 +527,9 @@ class DirectMethod(BaseOffPolicyEstimator):\nParameters\n----------\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given training logged bandit feedback.\n+\naction_dist: array-like shape (n_rounds, n_actions, len_list)\nDistribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n@@ -528,12 +543,14 @@ class DirectMethod(BaseOffPolicyEstimator):\n\"\"\"\nreturn self._estimate_round_rewards(\n+ position=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\naction_dist=action_dist,\n).mean()\ndef estimate_interval(\nself,\n+ position: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\nalpha: float = 0.05,\n@@ -545,6 +562,9 @@ class DirectMethod(BaseOffPolicyEstimator):\nParameters\n----------\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given training logged bandit feedback.\n+\naction_dist: array-like shape (n_rounds, n_actions, len_list)\nDistribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n@@ -567,6 +587,7 @@ class DirectMethod(BaseOffPolicyEstimator):\n\"\"\"\nestimated_round_rewards = self._estimate_round_rewards(\n+ position=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\naction_dist=action_dist,\n)\n@@ -658,8 +679,15 @@ class DoublyRobust(InverseProbabilityWeighting):\nRewards estimated by the DR estimator for each round.\n\"\"\"\n+ n_rounds = position.shape[0]\n+ estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), :, position\n+ ]\n+ action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\nround_rewards = np.average(\n- estimated_rewards_by_reg_model, weights=action_dist, axis=(1, 2)\n+ estimated_rewards_by_reg_model_at_position,\n+ weights=action_dist_at_position,\n+ axis=1,\n)\nimportance_weight = (\naction_dist[np.arange(action.shape[0]), action, position] / pscore\n@@ -860,8 +888,15 @@ class SelfNormalizedDoublyRobust(DoublyRobust):\nRewards estimated by the SNDR estimator for each round.\n\"\"\"\n+ n_rounds = position.shape[0]\n+ estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), :, position\n+ ]\n+ action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\nround_rewards = np.average(\n- estimated_rewards_by_reg_model, weights=action_dist, axis=(1, 2)\n+ estimated_rewards_by_reg_model_at_position,\n+ weights=action_dist_at_position,\n+ axis=1,\n)\nimportance_weight = (\naction_dist[np.arange(action.shape[0]), action, position] / pscore\n@@ -918,7 +953,7 @@ class SwitchDoublyRobust(DoublyRobust):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\nassert (\n- self.tau >= 1.0\n+ self.tau >= 0.0\n), f\"switching hyperparameter should be larger than 1. but {self.tau} is given\"\ndef _estimate_round_rewards(\n@@ -960,8 +995,15 @@ class SwitchDoublyRobust(DoublyRobust):\nRewards estimated by the Switch-DR estimator for each round.\n\"\"\"\n+ n_rounds = position.shape[0]\n+ estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), :, position\n+ ]\n+ action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\nround_rewards = np.average(\n- estimated_rewards_by_reg_model, weights=action_dist, axis=(1, 2)\n+ estimated_rewards_by_reg_model_at_position,\n+ weights=action_dist_at_position,\n+ axis=1,\n)\nimportance_weight = (\naction_dist[np.arange(action.shape[0]), action, position] / pscore\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
update ope estimators
641,014
01.10.2020 03:57:34
-32,400
7c6fd39463a09b857eb770af8b8e13d84297b263
add `is_zozotown_prior` to BernoulliTS
[ { "change_type": "MODIFY", "old_path": "benchmark/cf_policy_search/README.md", "new_path": "benchmark/cf_policy_search/README.md", "diff": "-### run cf policy search\n+# Counterfactual Policy Search\n+\n+## Description\n+\n+## Running Counterfactual Policy Search\n```\nfor model in lightgbm\n@@ -20,3 +24,5 @@ done\n```\npython run_cf_policy_search.py --context_set 1 --base_model logistic_regression --campaign men --n_boot_samples 2 --test_size 0.9\n```\n+\n+## Results\n" }, { "change_type": "MODIFY", "old_path": "benchmark/cf_policy_search/run_cf_policy_search.py", "new_path": "benchmark/cf_policy_search/run_cf_policy_search.py", "diff": "@@ -26,7 +26,7 @@ base_model_dict = dict(\n)\nif __name__ == \"__main__\":\n- parser = argparse.ArgumentParser(description=\"run counterfactual policy selection.\")\n+ parser = argparse.ArgumentParser(description=\"run evaluation policy selection.\")\nparser.add_argument(\n\"--n_boot_samples\",\ntype=int,\n@@ -45,7 +45,7 @@ if __name__ == \"__main__\":\ntype=str,\nchoices=[\"logistic_regression\", \"lightgbm\", \"random_forest\"],\nrequired=True,\n- help=\"base model for a counterfactual policy to be evaluated\",\n+ help=\"base model for a evaluation policy to be evaluated\",\n)\nparser.add_argument(\n\"--behavior_policy\",\n@@ -89,8 +89,8 @@ if __name__ == \"__main__\":\ndata_path=data_path,\ncontext_set=context_set,\n)\n- # define a counterfactual policy\n- counterfactual_policy = IPWLearner(\n+ # define a evaluation policy\n+ evaluation_policy = IPWLearner(\nbase_model=base_model_dict[base_model](**hyperparams[base_model]),\nn_actions=obd.n_actions,\nlen_list=obd.len_list,\n@@ -115,7 +115,7 @@ if __name__ == \"__main__\":\ntest_size=test_size, is_timeseries_split=True, random_state=b\n)\n# train an evaluation on the training set of the logged bandit feedback data\n- action_dist = counterfactual_policy.fit(\n+ action_dist = evaluation_policy.fit(\ncontext=boot_bandit_feedback[\"context\"],\naction=boot_bandit_feedback[\"action\"],\nreward=boot_bandit_feedback[\"reward\"],\n@@ -123,7 +123,7 @@ if __name__ == \"__main__\":\nposition=boot_bandit_feedback[\"position\"],\n)\n# make action selections (predictions)\n- action_dist = counterfactual_policy.predict(\n+ action_dist = evaluation_policy.predict(\ncontext=boot_bandit_feedback[\"context_test\"]\n)\n# estimate the policy value of a given counterfactual algorithm by the three OPE estimators.\n@@ -149,12 +149,12 @@ if __name__ == \"__main__\":\n# calculate estimated policy value relative to that of the behavior policy\nprint(\"=\" * 70)\n- print(f\"random_state={random_state}: counterfactual policy={policy_name}\")\n+ print(f\"random_state={random_state}: evaluation policy={policy_name}\")\nprint(\"-\" * 70)\nprint(ope_results_df)\nprint(\"=\" * 70)\n- # save counterfactual policy evaluation results in `./logs` directory\n+ # save evaluation policy evaluation results in `./logs` directory\nsave_path = Path(\"./logs\") / behavior_policy / campaign\nsave_path.mkdir(exist_ok=True, parents=True)\nope_results_df.to_csv(save_path / f\"{policy_name}.csv\")\n" }, { "change_type": "MODIFY", "old_path": "benchmark/ope/README.md", "new_path": "benchmark/ope/README.md", "diff": "+# Benchmarking Off-Policy Evaluation\n+## Description\n-### train regression model\n+## Training Regression Model\n```\nfor model in logistic_regression\ndo\n- for pi_b in bts\n+ for pi_b in random\ndo\n- for camp in men women all\n+ for camp in men\ndo\n- screen python train_regression_model.py\\\n+ python train_regression_model.py\\\n--n_boot_samples 5\\\n--base_model $model\\\n--behavior_policy $pi_b\\\n@@ -20,21 +22,17 @@ do\ndone\n```\n-```\n-python train_regression_model.py --n_boot_samples 3 --base_model lightgbm --behavior_policy random --campaign men --is_timeseries_split\n-```\n-\n-### benchmark off-policy estimators\n+## Evaluating Off-Policy Estimators\n```\nfor model in logistic_regression\ndo\nfor pi_b in random\ndo\n- for camp in all men women\n+ for camp in men\ndo\n- screen python benchmark_off_policy_estimators.py\\\n+ python benchmark_off_policy_estimators.py\\\n--n_boot_samples 5\\\n--base_model $model\\\n--behavior_policy $pi_b\\\n@@ -44,6 +42,4 @@ do\ndone\n```\n-```\n-python benchmark_off_policy_estimators.py --n_boot_samples 3 --base_model lightgbm --behavior_policy random --campaign men --is_timeseries_split\n-```\n+## Results\n" }, { "change_type": "MODIFY", "old_path": "benchmark/ope/benchmark_off_policy_estimators.py", "new_path": "benchmark/ope/benchmark_off_policy_estimators.py", "diff": "@@ -2,13 +2,12 @@ import argparse\nimport pickle\nimport time\nfrom pathlib import Path\n-import yaml\nimport numpy as np\nimport pandas as pd\nfrom obp.dataset import OpenBanditDataset\n-from obp.policy import BernoulliTS\n+from obp.policy import BernoulliTS, Random\nfrom obp.ope import (\nOffPolicyEvaluation,\nInverseProbabilityWeighting,\n@@ -17,12 +16,6 @@ from obp.ope import (\nDirectMethod,\nDoublyRobust,\n)\n-from obp.utils import estimate_confidence_interval_by_bootstrap\n-\n-# configurations to reproduce the Bernoulli Thompson Sampling policy\n-# used in ZOZOTOWN production\n-with open(\"./conf/prior_bts.yaml\", \"rb\") as f:\n- production_prior_for_bts = yaml.safe_load(f)\nif __name__ == \"__main__\":\nparser = argparse.ArgumentParser(description=\"evaluate off-policy estimators.\")\n@@ -70,6 +63,12 @@ if __name__ == \"__main__\":\naction=\"store_true\",\nhelp=\"If true, split the original logged badnit feedback data by time series.\",\n)\n+ parser.add_argument(\n+ \"--n_sim_to_compute_action_dist\",\n+ type=float,\n+ default=1000000,\n+ help=\"number of monte carlo simulation to compute the action distribution of bts.\",\n+ )\nparser.add_argument(\"--random_state\", type=int, default=12345)\nargs = parser.parse_args()\nprint(args)\n@@ -78,11 +77,12 @@ if __name__ == \"__main__\":\nn_boot_samples = args.n_boot_samples\nbase_model = args.base_model\nbehavior_policy = args.behavior_policy\n- counterfactual_policy = \"bts\" if behavior_policy == \"random\" else \"random\"\n+ evaluation_policy = \"bts\" if behavior_policy == \"random\" else \"random\"\ncampaign = args.campaign\nn_sim_for_action_dist = args.n_sim_for_action_dist\ntest_size = args.test_size\nis_timeseries_split = args.is_timeseries_split\n+ n_sim_to_compute_action_dist = args.n_sim_to_compute_action_dist\nrandom_state = args.random_state\ndata_path = Path(\"../open_bandit_dataset\")\n# prepare path\n@@ -97,12 +97,6 @@ if __name__ == \"__main__\":\nobd = OpenBanditDataset(\nbehavior_policy=behavior_policy, campaign=campaign, data_path=data_path\n)\n- # hyparparameters for counterfactual policies\n- kwargs = dict(\n- n_actions=obd.n_actions, len_list=obd.len_list, random_state=random_state\n- )\n- kwargs[\"alpha\"] = production_prior_for_bts[campaign][\"alpha\"]\n- kwargs[\"beta\"] = production_prior_for_bts[campaign][\"beta\"]\n# compared OPE estimators\nope_estimators = [\nDirectMethod(),\n@@ -114,23 +108,20 @@ if __name__ == \"__main__\":\nSwitchDoublyRobust(tau=10, estimator_name=\"switch-dr(10)\"),\nSwitchDoublyRobust(tau=100, estimator_name=\"switch-dr(100)\"),\n]\n- # ground-truth policy value of a counterfactual policy\n+ # ground-truth policy value of a evaluation policy\n# , which is estimated with factual (observed) rewards (on-policy estimation)\nground_truth_policy_value = OpenBanditDataset.calc_on_policy_policy_value_estimate(\n- behavior_policy=counterfactual_policy,\n+ behavior_policy=evaluation_policy,\ncampaign=campaign,\ndata_path=data_path,\ntest_size=test_size,\nis_timeseries_split=is_timeseries_split,\n)\n- ope_results = {\n- est.estimator_name: np.zeros(n_boot_samples) for est in ope_estimators\n- }\n- evaluation_of_ope_results = {\n+ start = time.time()\n+ relative_ee = {\nest.estimator_name: np.zeros(n_boot_samples) for est in ope_estimators\n}\n- start = time.time()\nfor b in np.arange(n_boot_samples):\n# load the pre-trained regression model\nwith open(reg_model_path / f\"reg_model_{b}.pkl\", \"rb\") as f:\n@@ -143,22 +134,26 @@ if __name__ == \"__main__\":\n)\nfor key_ in [\"context\", \"action\", \"reward\", \"pscore\", \"position\"]:\nboot_bandit_feedback[key_] = boot_bandit_feedback[key_][~is_for_reg_model]\n- if counterfactual_policy == \"bts\":\n- policy = BernoulliTS(**kwargs)\n- action_count = np.zeros((policy.n_actions, policy.len_list))\n- for _ in np.arange(n_sim_for_action_dist):\n- selected_actions = policy.select_action()\n- for pos in np.arange(policy.len_list):\n- action_count[selected_actions[pos], pos] += 1\n- action_dist = np.tile(\n- action_count / n_sim_for_action_dist,\n- (boot_bandit_feedback[\"n_rounds\"], 1, 1),\n+ if evaluation_policy == \"bts\":\n+ policy = BernoulliTS(\n+ n_actions=obd.n_actions,\n+ len_list=obd.len_list,\n+ is_zozotown_prior=True, # replicate the policy in the ZOZOTOWN production\n+ campaign=campaign,\n+ random_state=random_state,\n+ )\n+ action_dist = policy.compute_batch_action_dist(\n+ n_sim=100000, n_rounds=boot_bandit_feedback[\"n_rounds\"]\n)\nelse:\n- # the random policy has uniformally random distribution over actions\n- action_dist = np.ones(\n- (boot_bandit_feedback[\"n_rounds\"], obd.n_actions, obd.len_list)\n- ) * (1 / obd.n_actions)\n+ policy = Random(\n+ n_actions=obd.n_actions,\n+ len_list=obd.len_list,\n+ random_state=random_state,\n+ )\n+ action_dist = policy.compute_batch_action_dist(\n+ n_sim=100000, n_rounds=boot_bandit_feedback[\"n_rounds\"]\n+ )\n# evaluate the estimation performance of OPE estimators\nope = OffPolicyEvaluation(\nbandit_feedback=boot_bandit_feedback,\n@@ -170,44 +165,26 @@ if __name__ == \"__main__\":\naction_dist=action_dist,\nground_truth_policy_value=ground_truth_policy_value,\n)\n- # store estimated policy values by OPE estimators at each bootstrap\n- for (\n- estimator_name,\n- estimated_policy_value,\n- ) in estimated_policy_values.items():\n- ope_results[estimator_name][b] = estimated_policy_value\n# store relative estimation errors of OPE estimators at each bootstrap\nfor (\nestimator_name,\nrelative_estimation_error,\n) in relative_estimation_errors.items():\n- evaluation_of_ope_results[estimator_name][b] = relative_estimation_error\n+ relative_ee[estimator_name][b] = relative_estimation_error\nprint(f\"{b+1}th iteration: {np.round((time.time() - start) / 60, 2)}min\")\n# estimate confidence intervals of relative estimation by nonparametric bootstrap method\n- ope_results_with_ci = {est.estimator_name: dict() for est in ope_estimators}\n- evaluation_of_ope_results_with_ci = {\n- est.estimator_name: dict() for est in ope_estimators\n- }\n- for estimator_name in ope_results_with_ci.keys():\n- ope_results_with_ci[estimator_name] = estimate_confidence_interval_by_bootstrap(\n- samples=ope_results[estimator_name], random_state=random_state\n- )\n- evaluation_of_ope_results_with_ci[\n+ evaluation_of_ope_results = {est.estimator_name: dict() for est in ope_estimators}\n+ for estimator_name in evaluation_of_ope_results.keys():\n+ evaluation_of_ope_results[estimator_name][\"mean\"] = relative_ee[\nestimator_name\n- ] = estimate_confidence_interval_by_bootstrap(\n- samples=evaluation_of_ope_results[estimator_name], random_state=random_state\n- )\n- evaluation_of_ope_results_with_ci[estimator_name][\n- \"mean(no-boot)\"\n- ] = evaluation_of_ope_results[estimator_name].mean()\n- evaluation_of_ope_results_with_ci[estimator_name][\"std\"] = np.std(\n- evaluation_of_ope_results[estimator_name], ddof=1\n+ ].mean()\n+ evaluation_of_ope_results[estimator_name][\"std\"] = np.std(\n+ relative_ee[estimator_name], ddof=1\n)\n- ope_results_df = pd.DataFrame(ope_results_with_ci).T\n- evaluation_of_ope_results_df = pd.DataFrame(evaluation_of_ope_results_with_ci).T\n+ evaluation_of_ope_results_df = pd.DataFrame(evaluation_of_ope_results).T\nprint(\"=\" * 50)\nprint(f\"random_state={random_state}\")\nprint(\"-\" * 50)\n@@ -215,6 +192,5 @@ if __name__ == \"__main__\":\nprint(\"=\" * 50)\n# save results of the evaluation of off-policy estimators in './logs' directory.\n- ope_results_df.to_csv(log_path / f\"estimated_policy_values_by_ope_estimators.csv\")\nevaluation_of_ope_results_df.to_csv(log_path / f\"relative_ee_of_ope_estimators.csv\")\n" }, { "change_type": "DELETE", "old_path": "benchmark/ope/conf/batch_size_bts.yaml", "new_path": null, "diff": "-all: 1800\n-men: 3200\n-women: 4850\n" }, { "change_type": "MODIFY", "old_path": "benchmark/ope/train_regression_model.py", "new_path": "benchmark/ope/train_regression_model.py", "diff": "import time\nimport argparse\n-from pathlib import Path\nimport yaml\nimport pickle\n+from pathlib import Path\nimport numpy as np\nimport pandas as pd\n" }, { "change_type": "RENAME", "old_path": "benchmark/ope/conf/prior_bts.yaml", "new_path": "obp/policy/conf/prior_bts.yaml", "diff": "" }, { "change_type": "MODIFY", "old_path": "obp/policy/contextfree.py", "new_path": "obp/policy/contextfree.py", "diff": "# Licensed under the Apache 2.0 License.\n\"\"\"Context-Free Bandit Algorithms.\"\"\"\n+import os\n+\n+# import pkg_resources\n+import yaml\nfrom dataclasses import dataclass\nfrom typing import Optional\n@@ -10,6 +14,13 @@ import numpy as np\nfrom .base import BaseContextFreePolicy\n+# configurations to replicate the Bernoulli Thompson Sampling policy used in ZOZOTOWN production\n+prior_bts_file = os.path.join(os.path.dirname(__file__), \"conf\", \"prior_bts.yaml\")\n+# prior_bts_file = pkg_resources.resource_stream(__name__, \"data/emperors.csv\")\n+with open(prior_bts_file, \"rb\") as f:\n+ production_prior_for_bts = yaml.safe_load(f)\n+\n+\n@dataclass\nclass EpsilonGreedy(BaseContextFreePolicy):\n\"\"\"Epsilon Greedy policy.\n@@ -114,6 +125,28 @@ class Random(EpsilonGreedy):\npolicy_name: str = \"random\"\n+ def compute_batch_action_dist(\n+ self, n_rounds: int = 1, n_sim: int = 100000,\n+ ) -> np.ndarray:\n+ \"\"\"Compute the distribution over actions by Monte Carlo simulation.\n+\n+ Parameters\n+ ----------\n+ n_rounds: int, default: 1\n+ Number of rounds in the distribution over actions.\n+ (the size of the first axis of `action_dist`)\n+\n+ Returns\n+ ----------\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Probability estimates of each arm being the best one for each sample, action, and position.\n+\n+ \"\"\"\n+ action_dist = np.ones((n_rounds, self.n_actions, self.len_list)) * (\n+ 1 / self.n_actions\n+ )\n+ return action_dist\n+\n@dataclass\nclass BernoulliTS(BaseContextFreePolicy):\n@@ -140,6 +173,13 @@ class BernoulliTS(BaseContextFreePolicy):\nbeta: array-like, shape (n_actions, ), default: None\nPrior parameter vector for Beta distributions.\n+ is_zozotown_prior: bool, default: False\n+ Whether to use hyperparameters for the beta distribution used\n+ at the start of the data collection period in ZOZOTOWN.\n+\n+ campaign: str, default: None\n+ One of the three possible campaigns, \"all\", \"men\", and \"women\".\n+\npolicy_name: str, default: 'bts'\nName of bandit policy.\n@@ -147,11 +187,20 @@ class BernoulliTS(BaseContextFreePolicy):\nalpha: Optional[np.ndarray] = None\nbeta: Optional[np.ndarray] = None\n+ is_zozotown_prior: bool = False\n+ campaign: Optional[str] = None\npolicy_name: str = \"bts\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\nsuper().__post_init__()\n+ if self.is_zozotown_prior:\n+ assert (\n+ self.campaign is not None\n+ ), \"`campaign` must be specified when `is_zozotown_prior` is True.\"\n+ self.alpha = production_prior_for_bts[self.campaign][\"alpha\"]\n+ self.beta = production_prior_for_bts[self.campaign][\"beta\"]\n+ else:\nself.alpha = np.ones(self.n_actions) if self.alpha is None else self.alpha\nself.beta = np.ones(self.n_actions) if self.beta is None else self.beta\n@@ -188,3 +237,31 @@ class BernoulliTS(BaseContextFreePolicy):\nif self.n_trial % self.batch_size == 0:\nself.action_counts = np.copy(self.action_counts_temp)\nself.reward_counts = np.copy(self.reward_counts_temp)\n+\n+ def compute_batch_action_dist(\n+ self, n_rounds: int = 1, n_sim: int = 100000,\n+ ) -> np.ndarray:\n+ \"\"\"Compute the distribution over actions by Monte Carlo simulation.\n+\n+ Parameters\n+ ----------\n+ n_rounds: int, default: 1\n+ Number of rounds in the distribution over actions.\n+ (the size of the first axis of `action_dist`)\n+\n+ n_sim: int, default: 100000\n+ Number of simulations in the Monte Carlo simulation to compute the distribution over actions.\n+\n+ Returns\n+ ----------\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Probability estimates of each arm being the best one for each sample, action, and position.\n+\n+ \"\"\"\n+ action_count = np.zeros((self.n_actions, self.len_list))\n+ for _ in np.arange(n_sim):\n+ selected_actions = self.select_action()\n+ for pos in np.arange(self.len_list):\n+ action_count[selected_actions[pos], pos] += 1\n+ action_dist = np.tile(action_count / n_sim, (n_rounds, 1, 1),)\n+ return action_dist\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add `is_zozotown_prior` to BernoulliTS
641,014
01.10.2020 14:06:58
-32,400
6371b0af98e810b1be74d406bbfd349da1fd3960
update docs of meta.py
[ { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -50,38 +50,37 @@ class OffPolicyEvaluation:\n# using log data generated by the Random policy\n>>> from obp.dataset import OpenBanditDataset\n>>> from obp.policy import BernoulliTS\n- >>> from obp.simulator import run_bandit_simulation\n- >>> from obp.ope import OffPolicyEvaluation, ReplayMethod\n+ >>> from obp.ope import OffPolicyEvaluation, Inverse Probability Weighting as IPW\n# (1) Data loading and preprocessing\n- >>> dataset = OpenBanditDataset(behavior_policy='random', campaign='women')\n+ >>> dataset = OpenBanditDataset(behavior_policy='random', campaign='all')\n>>> bandit_feedback = dataset.obtain_batch_bandit_feedback()\n>>> bandit_feedback.keys()\ndict_keys(['n_rounds', 'n_actions', 'action', 'position', 'reward', 'pscore', 'context', 'action_context'])\n# (2) Offline Bandit Simulation\n- >>> counterfactual_policy = BernoulliTS(n_actions=dataset.n_actions, len_list=dataset.len_list)\n- >>> selected_actions = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=counterfactual_policy)\n- >>> selected_actions\n- array([[24, 14, 30],\n- [30, 27, 24],\n- [14, 12, 32],\n- ...,\n- [17, 13, 30],\n- [20, 34, 6],\n- [30, 3, 20]])\n+ >>> evaluation_policy = BernoulliTS(\n+ n_actions=dataset.n_actions,\n+ len_list=dataset.len_list,\n+ is_zozotown_prior=True, # replicate the policy in the ZOZOTOWN production\n+ campaign=\"all\",\n+ random_state=12345\n+ )\n+ >>> action_dist = evaluation_policy.compute_batch_action_dist(\n+ n_sim=100000, n_rounds=bandit_feedback[\"n_rounds\"]\n+ )\n# (3) Off-Policy Evaluation\n- >>> ope = OffPolicyEvaluation(bandit_feedback=bandit_feedback, ope_estimators=[ReplayMethod()])\n- >>> estimated_policy_value = ope.estimate_policy_values(selected_actions=selected_actions)\n+ >>> ope = OffPolicyEvaluation(bandit_feedback=bandit_feedback, ope_estimators=[IPW()])\n+ >>> estimated_policy_value = ope.estimate_policy_values(action_dist=action_dist)\n>>> estimated_policy_value\n- {'rm': 0.005155...}\n+ {'ipw': 0.004553...}\n# estimated performance of BernoulliTS relative to the ground-truth performance of Random\n- >>> relative_policy_value_of_bernoulli_ts = estimated_policy_value['rm'] / test['reward'].mean()\n- # our OPE procedure suggests that BernoulliTS improves Random by 12.05%\n- >>> relative_policy_value_of_bernoulli_ts\n- 1.120574...\n+ >>> relative_policy_value_of_bernoulli_ts = estimated_policy_value['ipw'] / bandit_feedback['reward'].mean()\n+ # our OPE procedure suggests that BernoulliTS improves Random by 19.81%\n+ >>> print(relative_policy_value_of_bernoulli_ts)\n+ 1.198126...\n\"\"\"\n@@ -138,7 +137,7 @@ class OffPolicyEvaluation:\nreturn estimator_inputs\ndef estimate_policy_values(self, action_dist: np.ndarray) -> Dict[str, float]:\n- \"\"\"Estimate policy value of a counterfactual policy.\n+ \"\"\"Estimate policy value of a evaluation policy.\nParameters\n------------\n@@ -283,7 +282,7 @@ class OffPolicyEvaluation:\nNumber of resampling performed in the bootstrap procedure.\nrelative: bool, default: False,\n- If True, the method visualizes the estimated policy values of counterfactual policy\n+ If True, the method visualizes the estimated policy values of evaluation policy\nrelative to the ground-truth policy value of behavior policy\nfig_dir: Path, default: None\n@@ -356,7 +355,7 @@ class OffPolicyEvaluation:\nSequence of actions selected by evaluation policy at each round in offline bandit simulation.\nground_truth policy value: int\n- Ground_truth policy value of a counterfactual policy, i.e., :math:`V(\\\\pi)`.\n+ Ground_truth policy value of a evaluation policy, i.e., :math:`V(\\\\pi)`.\nWith Open Bandit Dataset, in general, we use an on-policy estimate of the policy value as ground-truth.\nReturns\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
update docs of meta.py
641,014
01.10.2020 23:02:29
-32,400
9be6f9c973f098572218de6f1a01cf945946aa27
add default preprocessing of action_context
[ { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -13,7 +13,7 @@ from ..utils import check_bandit_feedback_inputs\n@dataclass\nclass RegressionModel:\n- \"\"\"ML model to predict the mean reward function (:math:`E[Y | X, A]`).\n+ \"\"\"ML model to estimate the mean reward function (:math:`\\\\mu(x, a) = \\\\mathbbb{E} [Y(a) | X=x]`).\nNote\n-------\n@@ -33,6 +33,7 @@ class RegressionModel:\naction_context: array-like, shape (n_actions, dim_action_context), default=None\nContext vector characterizing each action.\n+ If not given, then one-hot encoding of the action variable is automatically used as `action_context`.\nfitting_method: str, default='normal'\nMethod to fit the regression method.\n@@ -68,13 +69,15 @@ class RegressionModel:\nself.base_model_list = [\nclone(self.base_model) for _ in np.arange(self.len_list)\n]\n+ if self.action_context is None:\n+ self.action_context = np.eye(self.n_actions, dtype=int)\ndef fit(\nself,\ncontext: np.ndarray,\naction: np.ndarray,\nreward: np.ndarray,\n- pscore: np.ndarray,\n+ pscore: Optional[np.ndarray] = None,\nposition: Optional[np.ndarray] = None,\n) -> None:\n\"\"\"Fit the regression model on given logged bandit feedback data.\n@@ -112,26 +115,24 @@ class RegressionModel:\nassert self.len_list == 1, \"position has to be set when len_list is 1\"\nposition = np.zeros_like(action)\nfor position_ in np.arange(self.len_list):\n- # create context vector to make predictions\n+ idx = position == position_\nX = self._pre_process_for_reg_model(\n- context=context[position == position_],\n- action=action[position == position_],\n+ context=context[idx],\n+ action=action[idx],\naction_context=self.action_context,\n)\n# train the base model according to the given `fitting method`\nif self.fitting_method == \"normal\":\n- self.base_model_list[position_].fit(X, reward[position == position_])\n+ self.base_model_list[position_].fit(X, reward[idx])\nelif self.fitting_method == \"iw\":\n- sample_weight = 1.0 / pscore[position == position_]\n+ sample_weight = 1.0 / pscore[idx]\nself.base_model_list[position_].fit(\n- X, reward[position == position_], sample_weight=sample_weight\n+ X, reward[idx], sample_weight=sample_weight\n)\nelif self.fitting_method == \"mrdr\":\n- sample_weight = (1.0 - pscore[position == position_]) / (\n- pscore[position == position_] ** 2\n- )\n+ sample_weight = (1.0 - pscore[idx]) / (pscore[idx] ** 2)\nself.base_model_list[position_].fit(\n- X, reward[position == position_], sample_weight=sample_weight\n+ X, reward[idx], sample_weight=sample_weight\n)\ndef predict(self, context: np.ndarray) -> np.ndarray:\n@@ -145,7 +146,7 @@ class RegressionModel:\nReturns\n-----------\nestimated_rewards_by_reg_model: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n- Estimated rewards by regression model for new data.\n+ Estimated expected rewards for new data given each item and position by the regression model.\n\"\"\"\nn_rounds_of_new_data = context.shape[0]\n@@ -153,7 +154,6 @@ class RegressionModel:\nestimated_rewards_by_reg_model = np.zeros(\n(n_rounds_of_new_data, self.n_actions, self.len_list)\n)\n- # create context vector to make predictions\nfor action_ in np.arange(self.n_actions):\nfor position_ in np.arange(self.len_list):\nX = self._pre_process_for_reg_model(\n@@ -161,7 +161,6 @@ class RegressionModel:\naction=action_ * ones_n_rounds_arr,\naction_context=self.action_context,\n)\n- # make predictions\nestimated_rewards_ = (\nself.base_model_list[position_].predict_proba(X)[:, 1]\nif is_classifier(self.base_model_list[position_])\n@@ -199,7 +198,4 @@ class RegressionModel:\nContext vector characterizing each action.\n\"\"\"\n- if action_context is None:\n- return context\n- else:\nreturn np.c_[context, action_context[action]]\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add default preprocessing of action_context
641,014
01.10.2020 23:02:54
-32,400
79bb28863b5c6165b18c6e24c37d2157c771b2c1
add some benchmark estimators to the synthetic example
[ { "change_type": "MODIFY", "old_path": "examples/examples_with_synthetic/README.md", "new_path": "examples/examples_with_synthetic/README.md", "diff": "@@ -8,7 +8,8 @@ Specifically, we evaluate the estimation performances of well-known off-policy e\n## Evaluating Off-Policy Estimators\n-In the following, we evaluate the estimation performances of Direct Method (DM), Inverse Probability Weighting (IPW), Self-Normalized Inverse Probability Weighting (SNIPW), Doubly Robust (DR), Self-Normalized Doubly Robust (SNDR), and Switch Doubly Robust (Switch-DR).\n+In the following, we evaluate the estimation performances of Direct Method (DM), Inverse Probability Weighting (IPW), Self-Normalized Inverse Probability Weighting (SNIPW), Doubly Robust (DR), Self-Normalized Doubly Robust (SNDR), Switch Inverse Probability Weighting (Switch-IPW), Switch Doubly Robust (Switch-DR), and Doubly Robust with Optimistic Shrinkage (DRos).\n+For Switch-IPW, Switch-DR, and DRos, we tried some different values of hyperparameters.\n[`./evaluate_off_policy_estimators.py`](./evaluate_off_policy_estimators.py) implements the evaluation of OPE estimators using the synthetic bandit feedback data.\n@@ -45,18 +46,24 @@ python evaluate_off_policy_estimators.py\\\n--random_state 12345\n# relative estimation errors (lower is better) and their standard deviations of OPE estimators.\n-# our evaluation of OPE procedure suggests that Switch-DR performs better than the other estimators.\n-# ==============================\n+# our evaluation of OPE procedure suggests that Switch-DR(tau=100) performs better than the other estimators.\n+# Moreover, it appears that the performance of OPE estimators depend on the choice of hyperparameters.\n+# ========================================\n# random_state=12345\n-# ------------------------------\n+# ----------------------------------------\n# mean std\n# dm 0.025025 0.005871\n# ipw 0.011111 0.016634\n# snipw 0.010181 0.007922\n# dr 0.008184 0.007690\n-# sndr 0.011609 0.007727\n-# switch-dr 0.004839 0.004315\n-# ==============================\n+# sndr 0.013489 0.016228\n+# switch-ipw (tau=1) 0.394692 0.003630\n+# switch-ipw (tau=100) 0.010049 0.008941\n+# switch-dr (tau=1) 0.027602 0.005770\n+# switch-dr (tau=100) 0.004199 0.003130\n+# dr-os (lambda=1) 0.025708 0.005849\n+# dr-os (lambda=100) 0.008081 0.005060\n+# ========================================\n```\nThe above result can change with different situations.\n" }, { "change_type": "MODIFY", "old_path": "examples/examples_with_synthetic/evaluate_off_policy_estimators.py", "new_path": "examples/examples_with_synthetic/evaluate_off_policy_estimators.py", "diff": "@@ -24,6 +24,8 @@ from obp.ope import (\nDoublyRobust,\nSelfNormalizedDoublyRobust,\nSwitchDoublyRobust,\n+ SwitchInverseProbabilityWeighting,\n+ DoublyRobustWithShrinkage,\n)\n@@ -121,7 +123,14 @@ if __name__ == \"__main__\":\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\nSelfNormalizedDoublyRobust(),\n- SwitchDoublyRobust(),\n+ SwitchInverseProbabilityWeighting(tau=1, estimator_name=\"switch-ipw (tau=1)\"),\n+ SwitchInverseProbabilityWeighting(\n+ tau=100, estimator_name=\"switch-ipw (tau=100)\"\n+ ),\n+ SwitchDoublyRobust(tau=1, estimator_name=\"switch-dr (tau=1)\"),\n+ SwitchDoublyRobust(tau=100, estimator_name=\"switch-dr (tau=100)\"),\n+ DoublyRobustWithShrinkage(lambda_=1, estimator_name=\"dr-os (lambda=1)\"),\n+ DoublyRobustWithShrinkage(lambda_=100, estimator_name=\"dr-os (lambda=100)\"),\n]\nstart = time.time()\n@@ -184,11 +193,11 @@ if __name__ == \"__main__\":\nrelative_ee[estimator_name], ddof=1\n)\nevaluation_of_ope_results_df = pd.DataFrame(evaluation_of_ope_results).T\n- print(\"=\" * 30)\n+ print(\"=\" * 40)\nprint(f\"random_state={random_state}\")\n- print(\"-\" * 30)\n+ print(\"-\" * 40)\nprint(evaluation_of_ope_results_df)\n- print(\"=\" * 30)\n+ print(\"=\" * 40)\n# save results of the evaluation of off-policy estimators in './logs' directory.\nlog_path = Path(\"./logs\")\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add some benchmark estimators to the synthetic example
641,014
06.10.2020 00:34:15
-32,400
ffc8cf0b99017b581a4c7f87f268cfe74d2433cf
modify regression_model
[ { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -13,9 +13,7 @@ import pandas as pd\nimport seaborn as sns\nfrom .estimators import BaseOffPolicyEstimator\n-from .regression_model import RegressionModel\nfrom ..types import BanditFeedback\n-from ..utils import check_is_fitted\nlogger = getLogger(__name__)\n@@ -28,6 +26,7 @@ class OffPolicyEvaluation:\n------\nWhen you use model dependent estimators such as Direct Method and Doubly Robust,\nyou must give action context and regression model when defining this class.\n+ Note that the RegressionModel can be pre-trained.\nParameters\n-----------\n@@ -38,9 +37,6 @@ class OffPolicyEvaluation:\nList of OPE estimators used to evaluate the policy value of evaluation policy.\nEstimators must follow the interface of `obp.ope.BaseOffPolicyEstimator`.\n- regression_model: RegressionModel, default: None\n- Regression model that predicts the mean reward function :math:`E[Y | X, A]`.\n-\nExamples\n----------\n@@ -86,39 +82,18 @@ class OffPolicyEvaluation:\nbandit_feedback: BanditFeedback\nope_estimators: List[BaseOffPolicyEstimator]\n- regression_model: Optional[RegressionModel] = None\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\nfor key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\nif key_ not in self.bandit_feedback:\nraise RuntimeError(f\"Missing key of {key_} in 'bandit_feedback'.\")\n-\n- if self.regression_model is not None:\n- if check_is_fitted(self.regression_model.base_model):\n- logger.info(\"a fitted regression model is given.\")\n- else:\n- logger.info(\n- \"the given regression model is not fitted, and thus train it here...\"\n- )\n- self.regression_model.fit(\n- context=self.bandit_feedback[\"context\"],\n- action=self.bandit_feedback[\"action\"],\n- reward=self.bandit_feedback[\"reward\"],\n- pscore=self.bandit_feedback[\"pscore\"],\n- position=self.bandit_feedback[\"position\"],\n- )\n- else:\n- logger.warning(\n- \"regression model is not given; model dependent estimators such as DM or DR cannot be used.\"\n- )\n-\nself.ope_estimators_ = dict()\nfor estimator in self.ope_estimators:\nself.ope_estimators_[estimator.estimator_name] = estimator\ndef _create_estimator_inputs(\n- self, action_dist: np.ndarray\n+ self, action_dist: np.ndarray, estimated_rewards_by_reg_model: np.ndarray\n) -> Dict[str, np.ndarray]:\n\"\"\"Create input dictionary to estimate policy value by subclasses of `BaseOffPolicyEstimator`\"\"\"\nestimator_inputs = {\n@@ -126,24 +101,27 @@ class OffPolicyEvaluation:\nfor input_ in [\"reward\", \"action\", \"position\", \"pscore\"]\n}\nestimator_inputs[\"action_dist\"] = action_dist\n- if self.regression_model is not None:\n- estimated_rewards_by_reg_model = self.regression_model.predict(\n- context=self.bandit_feedback[\"context\"]\n- )\nestimator_inputs[\n\"estimated_rewards_by_reg_model\"\n] = estimated_rewards_by_reg_model\nreturn estimator_inputs\n- def estimate_policy_values(self, action_dist: np.ndarray) -> Dict[str, float]:\n+ def estimate_policy_values(\n+ self,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ ) -> Dict[str, float]:\n\"\"\"Estimate policy value of a evaluation policy.\nParameters\n------------\n- selected_actions: array-like, shape (n_rounds, len_list)\n- Sequence of actions selected by evaluation policy\n- at each round in offline bandit simulation.\n+ action_dist: array-like shape (n_rounds, n_actions, len_list)\n+ Distribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Estimated expected rewards for the given logged bandit feedback at each item and position by regression model.\n+ When it is not given, then model-dependent estimators such as DM and DR cannot be used.\nReturns\n----------\n@@ -153,9 +131,16 @@ class OffPolicyEvaluation:\n\"\"\"\nassert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\nassert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n+ if estimated_rewards_by_reg_model is None:\n+ logger.warning(\n+ \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n+ )\npolicy_value_dict = dict()\n- estimator_inputs = self._create_estimator_inputs(action_dist=action_dist)\n+ estimator_inputs = self._create_estimator_inputs(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\nfor estimator_name, estimator in self.ope_estimators_.items():\npolicy_value_dict[estimator_name] = estimator.estimate_policy_value(\n**estimator_inputs\n@@ -166,6 +151,7 @@ class OffPolicyEvaluation:\ndef estimate_intervals(\nself,\naction_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 100,\nrandom_state: Optional[int] = None,\n@@ -174,9 +160,12 @@ class OffPolicyEvaluation:\nParameters\n------------\n- selected_actions: array-like, shape (n_rounds, len_list)\n- Sequence of actions selected by evaluation policy\n- at each round in offline bandit simulation.\n+ action_dist: array-like shape (n_rounds, n_actions, len_list)\n+ Distribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Estimated expected rewards for the given logged bandit feedback at each item and position by regression model.\n+ When it is not given, then model-dependent estimators such as DM and DR cannot be used.\nalpha: float, default: 0.05\nP-value.\n@@ -191,14 +180,21 @@ class OffPolicyEvaluation:\n----------\npolicy_value_interval_dict: Dict[str, Dict[str, float]]\nDictionary containing confidence intervals of policy value estimated\n- by nonparametric booststrap procedure.\n+ by nonparametric bootstrap procedure.\n\"\"\"\nassert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\nassert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n+ if estimated_rewards_by_reg_model is None:\n+ logger.warning(\n+ \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n+ )\npolicy_value_interval_dict = dict()\n- estimator_inputs = self._create_estimator_inputs(action_dist=action_dist)\n+ estimator_inputs = self._create_estimator_inputs(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\nfor estimator_name, estimator in self.ope_estimators_.items():\npolicy_value_interval_dict[estimator_name] = estimator.estimate_interval(\n**estimator_inputs,\n@@ -212,6 +208,7 @@ class OffPolicyEvaluation:\ndef summarize_off_policy_estimates(\nself,\naction_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 100,\nrandom_state: Optional[int] = None,\n@@ -221,9 +218,12 @@ class OffPolicyEvaluation:\nParameters\n------------\n- selected_actions: array-like, shape (n_rounds, len_list)\n- Sequence of actions selected by evaluation policy\n- at each round in offline bandit simulation.\n+ action_dist: array-like shape (n_rounds, n_actions, len_list)\n+ Distribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Estimated expected rewards for the given logged bandit feedback at each item and position by regression model.\n+ When it is not given, then model-dependent estimators such as DM and DR cannot be used.\nalpha: float, default: 0.05\nP-value.\n@@ -244,12 +244,16 @@ class OffPolicyEvaluation:\nassert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\npolicy_value_df = pd.DataFrame(\n- self.estimate_policy_values(action_dist=action_dist),\n+ self.estimate_policy_values(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ ),\nindex=[\"estimated_policy_value\"],\n)\npolicy_value_interval_df = pd.DataFrame(\nself.estimate_intervals(\naction_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -261,6 +265,7 @@ class OffPolicyEvaluation:\ndef visualize_off_policy_estimates(\nself,\naction_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nrelative: bool = False,\nn_bootstrap_samples: int = 100,\n@@ -271,9 +276,12 @@ class OffPolicyEvaluation:\nParameters\n----------\n- selected_actions: array-like, shape (n_rounds, len_list)\n- Sequence of actions selected by evaluation policy\n- at each round in offline bandit simulation.\n+ action_dist: array-like shape (n_rounds, n_actions, len_list)\n+ Distribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Estimated expected rewards for the given logged bandit feedback at each item and position by regression model.\n+ When it is not given, then model-dependent estimators such as DM and DR cannot be used.\nalpha: float, default: 0.05\nP-value.\n@@ -300,9 +308,16 @@ class OffPolicyEvaluation:\nassert isinstance(fig_dir, Path), \"fig_dir must be a Path\"\nif fig_name is not None:\nassert isinstance(fig_name, str), \"fig_dir must be a string\"\n+ if estimated_rewards_by_reg_model is None:\n+ logger.warning(\n+ \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n+ )\nestimated_round_rewards_dict = dict()\n- estimator_inputs = self._create_estimator_inputs(action_dist=action_dist)\n+ estimator_inputs = self._create_estimator_inputs(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\nfor estimator_name, estimator in self.ope_estimators_.items():\nestimated_round_rewards_dict[\nestimator_name\n@@ -336,7 +351,10 @@ class OffPolicyEvaluation:\nfig.savefig(str(fig_dir / fig_name))\ndef evaluate_performance_of_estimators(\n- self, action_dist: np.ndarray, ground_truth_policy_value: float\n+ self,\n+ ground_truth_policy_value: float,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n) -> Dict[str, float]:\n\"\"\"Evaluate estimation accuracies of off-policy estimators by relative estimation error.\n@@ -351,13 +369,17 @@ class OffPolicyEvaluation:\nParameters\n----------\n- selected_actions: array-like, shape (n_rounds, len_list)\n- Sequence of actions selected by evaluation policy at each round in offline bandit simulation.\n-\n- ground_truth policy value: int\n+ ground_truth policy value: float\nGround_truth policy value of a evaluation policy, i.e., :math:`V(\\\\pi)`.\nWith Open Bandit Dataset, in general, we use an on-policy estimate of the policy value as ground-truth.\n+ action_dist: array-like shape (n_rounds, n_actions, len_list)\n+ Distribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Estimated expected rewards for the given logged bandit feedback at each item and position by regression model.\n+ When it is not given, then model-dependent estimators such as DM and DR cannot be used.\n+\nReturns\n----------\nrelative_estimation_error_dict: Dict[str, float]\n@@ -369,25 +391,44 @@ class OffPolicyEvaluation:\nassert isinstance(\nground_truth_policy_value, float\n), \"ground_truth_policy_value must be a float\"\n+ if estimated_rewards_by_reg_model is None:\n+ logger.warning(\n+ \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n+ )\nrelative_estimation_error_dict = dict()\n- estimator_inputs = self._create_estimator_inputs(action_dist=action_dist)\n+ estimator_inputs = self._create_estimator_inputs(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\nfor estimator_name, estimator in self.ope_estimators_.items():\nestimated_policy_value = estimator.estimate_policy_value(**estimator_inputs)\nrelative_estimation_error_dict[estimator_name] = np.abs(\n(estimated_policy_value - ground_truth_policy_value)\n/ ground_truth_policy_value\n)\n-\nreturn relative_estimation_error_dict\n- def summarize_estimators_comparison(self, action_dist: np.ndarray) -> pd.DataFrame:\n+ def summarize_estimators_comparison(\n+ self,\n+ ground_truth_policy_value: float,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ ) -> pd.DataFrame:\n\"\"\"Summarize performance comparison of given off-policy estimators.\nParameters\n----------\n- selected_actions: array-like, shape (n_rounds, len_list)\n- Sequence of actions selected by evaluation policy at each round in offline bandit simulation.\n+ ground_truth policy value: float\n+ Ground_truth policy value of a evaluation policy, i.e., :math:`V(\\\\pi)`.\n+ With Open Bandit Dataset, in general, we use an on-policy estimate of the policy value as ground-truth.\n+\n+ action_dist: array-like shape (n_rounds, n_actions, len_list)\n+ Distribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Estimated expected rewards for the given logged bandit feedback at each item and position by regression model.\n+ When it is not given, then model-dependent estimators such as DM and DR cannot be used.\nReturns\n----------\n@@ -399,8 +440,11 @@ class OffPolicyEvaluation:\nassert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\nrelative_estimation_error_df = pd.DataFrame(\n- self.evaluate_performance_of_estimators(action_dist=action_dist),\n+ self.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ ),\nindex=[\"relative_estimation_error\"],\n)\n-\nreturn relative_estimation_error_df.T\n" }, { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -7,13 +7,14 @@ from typing import Optional\nimport numpy as np\nfrom sklearn.base import BaseEstimator, clone, is_classifier\n+from sklearn.model_selection import StratifiedKFold\nfrom ..utils import check_bandit_feedback_inputs\n@dataclass\n-class RegressionModel:\n- \"\"\"ML model to estimate the mean reward function (:math:`\\\\mu(x, a) = \\\\mathbbb{E} [Y(a) | X=x]`).\n+class RegressionModel(BaseEstimator):\n+ \"\"\"Machine Learning model to estimate the mean reward function (:math:`\\\\mu(x, a) = \\\\mathbbb{E} [Y(a) | X=x]`).\nNote\n-------\n@@ -22,7 +23,7 @@ class RegressionModel:\nParameters\n------------\nbase_model: BaseEstimator\n- Model class to be used to predict the mean reward function.\n+ Model class to be used to estimate the mean reward function.\nn_actions: int\nNumber of actions.\n@@ -45,6 +46,12 @@ class RegressionModel:\nMehrdad Farajtabar, Yinlam Chow, and Mohammad Ghavamzadeh.\n\"More Robust Doubly Robust Off-policy Evaluation.\", 2018.\n+ Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.\n+ \"Doubly Robust Off-Policy Evaluation with Shrinkage.\", 2020.\n+\n+ Yusuke Narita, Shota Yasui, and Kohei Yata.\n+ \"Off-policy Bandit and Reinforcement Learning.\", 2020.\n+\n\"\"\"\nbase_model: BaseEstimator\n@@ -79,6 +86,7 @@ class RegressionModel:\nreward: np.ndarray,\npscore: Optional[np.ndarray] = None,\nposition: Optional[np.ndarray] = None,\n+ action_dist: Optional[np.ndarray] = None,\n) -> None:\n\"\"\"Fit the regression model on given logged bandit feedback data.\n@@ -102,6 +110,10 @@ class RegressionModel:\nIf None is given, a learner assumes that there is only one position.\nWhen `len_list` > 1, position has to be set.\n+ action_dist: array-like shape (n_rounds, n_actions, len_list), default=None\n+ Distribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n+ When either of 'iw' or 'mrdr' is used as the 'fitting_method' argument, then `action_dist` must be given.\n+\n\"\"\"\ncheck_bandit_feedback_inputs(\ncontext=context,\n@@ -111,9 +123,17 @@ class RegressionModel:\nposition=position,\naction_context=self.action_context,\n)\n- if position is None:\n- assert self.len_list == 1, \"position has to be set when len_list is 1\"\n+ if self.len_list == 1:\nposition = np.zeros_like(action)\n+ else:\n+ assert (\n+ position is not None\n+ ), \"position has to be set when len_list is larger than 1\"\n+ if self.fitting_method in [\"iw\", \"mrdr\"]:\n+ assert (\n+ action_dist is not None\n+ ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then `action_dist` must be given\"\n+ n_data = context.shape[0]\nfor position_ in np.arange(self.len_list):\nidx = position == position_\nX = self._pre_process_for_reg_model(\n@@ -124,13 +144,21 @@ class RegressionModel:\n# train the base model according to the given `fitting method`\nif self.fitting_method == \"normal\":\nself.base_model_list[position_].fit(X, reward[idx])\n- elif self.fitting_method == \"iw\":\n- sample_weight = 1.0 / pscore[idx]\n+ else:\n+ action_dist_at_position = action_dist[\n+ np.arange(n_data), action, position_ * np.ones(n_data, dtype=int)\n+ ][idx]\n+ if self.fitting_method == \"iw\":\n+ sample_weight = action_dist_at_position / pscore[idx]\nself.base_model_list[position_].fit(\nX, reward[idx], sample_weight=sample_weight\n)\nelif self.fitting_method == \"mrdr\":\n- sample_weight = (1.0 - pscore[idx]) / (pscore[idx] ** 2)\n+ sample_weight = (\n+ action_dist_at_position\n+ * (1.0 - pscore[idx])\n+ / (pscore[idx] ** 2)\n+ )\nself.base_model_list[position_].fit(\nX, reward[idx], sample_weight=sample_weight\n)\n@@ -173,11 +201,97 @@ class RegressionModel:\n] = estimated_rewards_\nreturn estimated_rewards_by_reg_model\n- def _pre_process_for_reg_model(\n+ def fit_predict(\nself,\ncontext: np.ndarray,\naction: np.ndarray,\n- action_context: Optional[np.ndarray] = None,\n+ reward: np.ndarray,\n+ pscore: Optional[np.ndarray] = None,\n+ position: Optional[np.ndarray] = None,\n+ action_dist: Optional[np.ndarray] = None,\n+ n_folds: int = 1,\n+ ) -> None:\n+ \"\"\"Fit the regression model on given logged bandit feedback data\n+ and then predict the mean reward function of the same data.\n+\n+ Note\n+ ------\n+ When `n_folds` is larger than 1, then the cross-fitting procedure is applied.\n+ See the reference for the details of the cross-fitting.\n+\n+ Parameters\n+ ----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors in the given training logged bandit feedback.\n+\n+ action: array-like, shape (n_rounds,)\n+ Selected actions by behavior policy in the given training logged bandit feedback.\n+\n+ reward: array-like, shape (n_rounds,)\n+ Observed rewards in the given training logged bandit feedback.\n+\n+ pscore: Optional[np.ndarray], default: None\n+ Propensity scores, the probability of selecting each action by behavior policy,\n+ in the given training logged bandit feedback.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given training logged bandit feedback.\n+ If None is given, a learner assumes that there is only one position.\n+ When `len_list` > 1, position has to be set.\n+\n+ action_dist: array-like shape (n_rounds, n_actions, len_list), default=None\n+ Distribution over actions, i.e., probability of items being selected at each position by the evaluation policy (can be deterministic).\n+ When either of 'iw' or 'mrdr' is used as the 'fitting_method' argument, then `action_dist` must be given.\n+\n+ n_folds: int, default=1\n+ Number of folds in the cross-fitting procedure.\n+ When 1 is given, then the regression model is trained on the whole logged bandit feedback data.\n+\n+ Returns\n+ -----------\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\n+ Estimated expected rewards for the given logged bandit feedback at each item and position by the regression model.\n+\n+ \"\"\"\n+ assert n_folds > 1 and isinstance(\n+ n_folds, int\n+ ), f\"n_folds must be an integer larger than 1, but {n_folds} is given\"\n+\n+ if n_folds == 1:\n+ self.fit(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ action_dist=action_dist,\n+ )\n+ return self.predict(context=context)\n+ else:\n+ estimated_rewards_by_reg_model = np.zeros(\n+ (context.shape[0], self.n_actions, self.len_list)\n+ )\n+ skf = StratifiedKFold(n_splits=n_folds)\n+ skf.get_n_splits(context, reward)\n+ for train_idx, test_idx in skf.split(context, reward):\n+ action_dist_tr = (\n+ action_dist[train_idx] if action_dist is not None else action_dist\n+ )\n+ self.fit(\n+ context=context[train_idx],\n+ action=action[train_idx],\n+ reward=reward[train_idx],\n+ pscore=pscore[train_idx],\n+ position=position[train_idx],\n+ action_dist=action_dist_tr,\n+ )\n+ estimated_rewards_by_reg_model[test_idx, :, :] = self.predict(\n+ context=context[test_idx]\n+ )\n+ return estimated_rewards_by_reg_model\n+\n+ def _pre_process_for_reg_model(\n+ self, context: np.ndarray, action: np.ndarray, action_context: np.ndarray,\n) -> np.ndarray:\n\"\"\"Preprocess feature vectors to train a give regression model.\n@@ -194,7 +308,7 @@ class RegressionModel:\naction: array-like, shape (n_rounds,)\nSelected actions by behavior policy in the given training logged bandit feedback.\n- action_context: array-like, shape shape (n_actions, dim_action_context), default=None\n+ action_context: array-like, shape shape (n_actions, dim_action_context)\nContext vector characterizing each action.\n\"\"\"\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
modify regression_model
641,014
06.10.2020 01:20:37
-32,400
018d6a9e4ebfb9de4eb8f0bbb3a182962611f6ba
adjust to the new implementation of regression model
[ { "change_type": "MODIFY", "old_path": "benchmark/ope/benchmark_off_policy_estimators.py", "new_path": "benchmark/ope/benchmark_off_policy_estimators.py", "diff": "@@ -12,11 +12,29 @@ from obp.ope import (\nOffPolicyEvaluation,\nInverseProbabilityWeighting,\nSelfNormalizedInverseProbabilityWeighting,\n- SwitchDoublyRobust,\nDirectMethod,\nDoublyRobust,\n+ SelfNormalizedDoublyRobust,\n+ SwitchDoublyRobust,\n+ SwitchInverseProbabilityWeighting,\n+ DoublyRobustWithShrinkage,\n)\n+# compared OPE estimators\n+ope_estimators = [\n+ DirectMethod(),\n+ InverseProbabilityWeighting(),\n+ SelfNormalizedInverseProbabilityWeighting(),\n+ DoublyRobust(),\n+ SelfNormalizedDoublyRobust(),\n+ SwitchInverseProbabilityWeighting(tau=1, estimator_name=\"switch-ipw (tau=1)\"),\n+ SwitchInverseProbabilityWeighting(tau=100, estimator_name=\"switch-ipw (tau=100)\"),\n+ SwitchDoublyRobust(tau=1, estimator_name=\"switch-dr (tau=1)\"),\n+ SwitchDoublyRobust(tau=100, estimator_name=\"switch-dr (tau=100)\"),\n+ DoublyRobustWithShrinkage(lambda_=1, estimator_name=\"dr-os (lambda=1)\"),\n+ DoublyRobustWithShrinkage(lambda_=100, estimator_name=\"dr-os (lambda=100)\"),\n+]\n+\nif __name__ == \"__main__\":\nparser = argparse.ArgumentParser(description=\"evaluate off-policy estimators.\")\nparser.add_argument(\n@@ -97,17 +115,6 @@ if __name__ == \"__main__\":\nobd = OpenBanditDataset(\nbehavior_policy=behavior_policy, campaign=campaign, data_path=data_path\n)\n- # compared OPE estimators\n- ope_estimators = [\n- DirectMethod(),\n- InverseProbabilityWeighting(),\n- SelfNormalizedInverseProbabilityWeighting(),\n- DoublyRobust(),\n- SwitchDoublyRobust(tau=0.1, estimator_name=\"switch-dr(0.1)\"),\n- SwitchDoublyRobust(tau=1.0, estimator_name=\"switch-dr(1.0)\"),\n- SwitchDoublyRobust(tau=10, estimator_name=\"switch-dr(10)\"),\n- SwitchDoublyRobust(tau=100, estimator_name=\"switch-dr(100)\"),\n- ]\n# ground-truth policy value of a evaluation policy\n# , which is estimated with factual (observed) rewards (on-policy estimation)\nground_truth_policy_value = OpenBanditDataset.calc_on_policy_policy_value_estimate(\n@@ -128,7 +135,7 @@ if __name__ == \"__main__\":\nreg_model = pickle.load(f)\nwith open(reg_model_path / f\"is_for_reg_model_{b}.pkl\", \"rb\") as f:\nis_for_reg_model = pickle.load(f)\n- # sample bootstrap from batch logged bandit feedback\n+ # sample bootstrap samples from batch logged bandit feedback\nboot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split, random_state=b\n)\n@@ -154,16 +161,18 @@ if __name__ == \"__main__\":\naction_dist = policy.compute_batch_action_dist(\nn_sim=100000, n_rounds=boot_bandit_feedback[\"n_rounds\"]\n)\n+ # estimate the mean reward function using the pre-trained reg_model\n+ estimated_rewards_by_reg_model = reg_model.predict(\n+ context=boot_bandit_feedback[\"context\"],\n+ )\n# evaluate the estimation performance of OPE estimators\nope = OffPolicyEvaluation(\n- bandit_feedback=boot_bandit_feedback,\n- regression_model=reg_model,\n- ope_estimators=ope_estimators,\n+ bandit_feedback=boot_bandit_feedback, ope_estimators=ope_estimators,\n)\n- estimated_policy_values = ope.estimate_policy_values(action_dist=action_dist,)\nrelative_estimation_errors = ope.evaluate_performance_of_estimators(\n- action_dist=action_dist,\nground_truth_policy_value=ground_truth_policy_value,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n)\n# store relative estimation errors of OPE estimators at each bootstrap\nfor (\n@@ -174,7 +183,7 @@ if __name__ == \"__main__\":\nprint(f\"{b+1}th iteration: {np.round((time.time() - start) / 60, 2)}min\")\n- # estimate confidence intervals of relative estimation by nonparametric bootstrap method\n+ # estimate means and standard deviations of relative estimation by nonparametric bootstrap method\nevaluation_of_ope_results = {est.estimator_name: dict() for est in ope_estimators}\nfor estimator_name in evaluation_of_ope_results.keys():\nevaluation_of_ope_results[estimator_name][\"mean\"] = relative_ee[\n" }, { "change_type": "MODIFY", "old_path": "benchmark/ope/train_regression_model.py", "new_path": "benchmark/ope/train_regression_model.py", "diff": "@@ -13,9 +13,8 @@ from sklearn.metrics import log_loss, roc_auc_score\nfrom obp.dataset import OpenBanditDataset\nfrom obp.ope import RegressionModel\n-from obp.utils import estimate_confidence_interval_by_bootstrap\n-\n+# hyperparameter settings for the base ML model in regression model\nwith open(\"./conf/hyperparams.yaml\", \"rb\") as f:\nhyperparams = yaml.safe_load(f)\n@@ -95,27 +94,26 @@ if __name__ == \"__main__\":\nmetrics[i]: np.zeros(n_boot_samples) for i in np.arange(len(metrics))\n}\nfor b in np.arange(n_boot_samples):\n- # sample bootstrap from batch logged bandit feedback\n+ # sample bootstrap samples from batch logged bandit feedback\nboot_bandit_feedback = obd.sample_bootstrap_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split, random_state=b\n)\n- # split data into two folds (data for training reg_model and for doing ope)\n+ # split data into two folds (data for training reg_model and for ope)\nis_for_reg_model = np.random.binomial(\nn=1, p=0.3, size=boot_bandit_feedback[\"n_rounds\"]\n).astype(bool)\n- # define a regression model\n+ # define regression model\nreg_model = RegressionModel(\nn_actions=obd.n_actions,\nlen_list=obd.len_list,\naction_context=boot_bandit_feedback[\"action_context\"],\nbase_model=base_model_dict[base_model](**hyperparams[base_model]),\n)\n- # train a regression model on logged bandit feedback data\n+ # train regression model on logged bandit feedback data\nreg_model.fit(\ncontext=boot_bandit_feedback[\"context\"][is_for_reg_model],\naction=boot_bandit_feedback[\"action\"][is_for_reg_model],\nreward=boot_bandit_feedback[\"reward\"][is_for_reg_model],\n- pscore=boot_bandit_feedback[\"pscore\"][is_for_reg_model],\nposition=boot_bandit_feedback[\"position\"][is_for_reg_model],\n)\n# evaluate the estimation performance of the regression model by AUC and RCE\n@@ -162,15 +160,16 @@ if __name__ == \"__main__\":\nf\"{np.round((time.time() - start_time) / 60, 1)}min\",\n)\n- # estimate confidence intervals of the performances of the regression model\n- performance_of_reg_model_with_ci = {}\n- for metric in metrics:\n- performance_of_reg_model_with_ci[\n+ # estimate means and standard deviations of the performances of the regression model\n+ performance_of_reg_model_ = {metric: dict() for metric in metrics}\n+ for metric in performance_of_reg_model_.keys():\n+ performance_of_reg_model_[metric][\"mean\"] = performance_of_reg_model[\nmetric\n- ] = estimate_confidence_interval_by_bootstrap(\n- samples=performance_of_reg_model[metric], random_state=random_state\n+ ].mean()\n+ performance_of_reg_model_[metric][\"std\"] = np.std(\n+ performance_of_reg_model[metric], ddof=1\n)\n- performance_of_reg_model_df = pd.DataFrame(performance_of_reg_model_with_ci).T\n+ performance_of_reg_model_df = pd.DataFrame(performance_of_reg_model_).T\nprint(\"=\" * 50)\nprint(f\"random_state={random_state}\")\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
adjust to the new implementation of regression model
641,014
23.10.2020 06:57:12
-32,400
2e089d876225be53597daf10203cc89ea200680b
fix some bugs in cross-fitting
[ { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -7,7 +7,7 @@ from typing import Optional\nimport numpy as np\nfrom sklearn.base import BaseEstimator, clone, is_classifier\n-from sklearn.model_selection import StratifiedKFold\n+from sklearn.model_selection import KFold\nfrom ..utils import check_bandit_feedback_inputs\n@@ -132,7 +132,10 @@ class RegressionModel(BaseEstimator):\nif self.fitting_method in [\"iw\", \"mrdr\"]:\nassert (\naction_dist is not None\n- ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then `action_dist` must be given\"\n+ ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then action_dist must be given\"\n+ assert (\n+ pscore is not None\n+ ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then pscore must be given\"\nn_data = context.shape[0]\nfor position_ in np.arange(self.len_list):\nidx = position == position_\n@@ -252,9 +255,22 @@ class RegressionModel(BaseEstimator):\nEstimated expected rewards for the given logged bandit feedback at each item and position by the regression model.\n\"\"\"\n- assert n_folds > 1 and isinstance(\n+ assert n_folds > 0 and isinstance(\nn_folds, int\n- ), f\"n_folds must be an integer larger than 1, but {n_folds} is given\"\n+ ), f\"n_folds must be a positive integer, but {n_folds} is given\"\n+ if self.len_list == 1:\n+ position = np.zeros_like(action)\n+ else:\n+ assert (\n+ position is not None\n+ ), \"position has to be set when len_list is larger than 1\"\n+ if self.fitting_method in [\"iw\", \"mrdr\"]:\n+ assert (\n+ action_dist is not None\n+ ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then action_dist must be given\"\n+ assert (\n+ pscore is not None\n+ ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then pscore must be given\"\nif n_folds == 1:\nself.fit(\n@@ -270,8 +286,8 @@ class RegressionModel(BaseEstimator):\nestimated_rewards_by_reg_model = np.zeros(\n(context.shape[0], self.n_actions, self.len_list)\n)\n- skf = StratifiedKFold(n_splits=n_folds)\n- skf.get_n_splits(context, reward)\n+ skf = KFold(n_splits=n_folds, shuffle=True)\n+ skf.get_n_splits(context)\nfor train_idx, test_idx in skf.split(context, reward):\naction_dist_tr = (\naction_dist[train_idx] if action_dist is not None else action_dist\n" }, { "change_type": "MODIFY", "old_path": "obp/version.py", "new_path": "obp/version.py", "diff": "-__version__ = \"0.3.0\"\n+__version__ = \"0.3.1\"\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix some bugs in cross-fitting
641,014
31.10.2020 01:19:44
-32,400
1a3c7d7ea9cd13ed9ef24ae8e0eee59f2858d5c9
fix typos in quickstarts
[ { "change_type": "MODIFY", "old_path": "examples/quickstart/quickstart.ipynb", "new_path": "examples/quickstart/quickstart.ipynb", "diff": "\"source\": [\n\"## (1) Data Loading and Preprocessing\\n\",\n\"\\n\",\n- \"We prepare easy-to-use data loader for Open Bandit Dataset, **OpenBanditDataset** class in dataset module. <br>\\n\",\n+ \"We prepare easy-to-use data loader for Open Bandit Dataset, `OpenBanditDataset` class in the dataset module. \\n\",\n+ \"\\n\",\n\"It takes behavior policy ('bts' or 'random') and campaign ('all', 'men', or 'women') as inputs and provides dataset preprocessing.\"\n]\n},\n\"data_path = Path('.').resolve().parents[1] / 'obd'\\n\",\n\"# load and preprocess raw data in \\\"All\\\" campaign collected by the Random policy (behavior policy here)\\n\",\n\"dataset = OpenBanditDataset(behavior_policy='random', campaign='all', data_path=data_path)\\n\",\n- \"# obtain logged bandit feedback generated by behavior polciy\\n\",\n+ \"# obtain logged bandit feedback generated by behavior policy\\n\",\n\"bandit_feedback = dataset.obtain_batch_bandit_feedback()\\n\",\n\"\\n\",\n\"# `bandit_feedback` is a dictionary storing logged bandit feedback\\n\",\n\"After preparing the dataset, we now run **offline bandit simulation**.\\n\",\n\"\\n\",\n\"We use **Bernoulli TS** impelemted in the policy module as an evaluation policy. \\n\",\n- \"By activating the `is_zozotown_prior` argument of BernoulliTS, we can replicate BernoulliTS used in ZOZOTOWN production.\\n\",\n+ \"By activating its `is_zozotown_prior` argument, we can replicate BernoulliTS used in ZOZOTOWN production.\\n\",\n\"(When `is_zozotown_prior=False`, non-informative prior distribution is used.)\"\n]\n},\n\" campaign=\\\"all\\\",\\n\",\n\" random_state=12345\\n\",\n\")\\n\",\n- \"# compute the distribution over actions by the evaluation policy using Monte Carlo simulation\\n\",\n+ \"# compute the action choice probabilities by the evaluation policy using Monte Carlo simulation\\n\",\n\"action_dist = evaluation_policy.compute_batch_action_dist(\\n\",\n\" n_sim=100000, n_rounds=bandit_feedback[\\\"n_rounds\\\"]\\n\",\n\")\\n\",\n\"\\n\",\n\"# action_dist is an array of shape (n_rounds, n_actions, len_list) \\n\",\n- \"# representing the distribution over actions by the counterfactual policy\\n\",\n+ \"# representing the distribution over actions by the evaluation policy\\n\",\n\"action_dist\"\n]\n},\n\"\\n\",\n\"Our final step is **off-policy evaluation**, which attempts to estimate the performance of new decision making policies using log data generated by behavior, past policies. \\n\",\n\"\\n\",\n- \"We use the *InverseProbabilityWeighting (IPW)*, *DirectMethod (DM)*, and *Doubly Robust (DR)* estimators as OPE estimators and estiamte the performance of Bernoulli TS using the logged bandit feedback. Finally, we summarize and visualize the OPE results.\"\n+ \"We use the *InverseProbabilityWeighting (IPW)*, *DirectMethod (DM)*, and *Doubly Robust (DR)* estimatorsand estiamte the performance of Bernoulli TS using the logged bandit feedback. \\n\",\n+ \"\\n\",\n+ \"Finally, we summarize and visualize the OPE results.\"\n]\n},\n{\n\"outputs\": [],\n\"source\": [\n\"# estimate the mean reward function by using ML model (Logistic Regression here)\\n\",\n- \"# the estimated reward is used by model-dependent estimators such as DM and DR\\n\",\n+ \"# the estimated rewards are used by model-dependent estimators such as DM and DR\\n\",\n\"regression_model = RegressionModel(\\n\",\n\" n_actions=dataset.n_actions,\\n\",\n\" len_list=dataset.len_list,\\n\",\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"Note that the demonstration of OPE here is with the small size example version of our dataset, \\n\",\n- \"and please use the full size version to produce more reasonable results\"\n+ \"Note that the OPE demonstration here is with the small size example version of our dataset. \\n\",\n+ \"\\n\",\n+ \"Please use its full size version (https://research.zozo.com/data.html) to produce more reasonable results.\"\n]\n},\n{\n" }, { "change_type": "MODIFY", "old_path": "examples/quickstart/quickstart_synthetic.ipynb", "new_path": "examples/quickstart/quickstart_synthetic.ipynb", "diff": "\"source\": [\n\"# Quickstart: Use Cases and Examples with Synthetic Data\\n\",\n\"---\\n\",\n- \"This notebook provides an example of conducting OPE of several evaluation policies using OPE estimators and synthetic logged bandit feedback data.\\n\",\n+ \"This notebook provides an example of conducting OPE of several different evaluation policies and synthetic logged bandit feedback data.\\n\",\n\"\\n\",\n\"Our example with synthetic data here contains the follwoing four major steps:\\n\",\n\"- (1) Synthetic Data Generation\\n\",\n\"## (2) Off-Policy Learning\\n\",\n\"After generating the synthetic data, we now train some candidate evaluation policies using the training set. <br>\\n\",\n\"\\n\",\n- \"We use *IPW Learner* implemented in the policy module to train evaluation policies. We also use *RandomForestClassifier* and *LogisticRegression* implemented in scikit-learn for the base ML model.\"\n+ \"We use *IPW Learner* implemented in the policy module to train evaluation policies. We also use *RandomForestClassifier* and *LogisticRegression* implemented in scikit-learn for base machine learning methods.\"\n]\n},\n{\n\" reward=bandit_feedback_train[\\\"reward\\\"],\\n\",\n\" pscore=bandit_feedback_train[\\\"pscore\\\"]\\n\",\n\")\\n\",\n- \"# predict the action decisions for the test set of the synthetic logged bandit feedback\\n\",\n+ \"# make the action decisions for the test set of the synthetic logged bandit feedback\\n\",\n\"action_dist_a = evaluation_policy_a.predict_proba(\\n\",\n\" context=bandit_feedback_test[\\\"context\\\"]\\n\",\n\")\"\n\" reward=bandit_feedback_train[\\\"reward\\\"],\\n\",\n\" pscore=bandit_feedback_train[\\\"pscore\\\"]\\n\",\n\")\\n\",\n- \"# predict the action decisions for the test set of the synthetic logged bandit feedback\\n\",\n+ \"# make the action decisions for the test set of the synthetic logged bandit feedback\\n\",\n\"action_dist_b = evaluation_policy_b.predict_proba(\\n\",\n\" context=bandit_feedback_test[\\\"context\\\"]\\n\",\n\")\"\n\"metadata\": {},\n\"source\": [\n\"## (3) Off-Policy Evaluation (OPE)\\n\",\n- \"Off-policy evaluation attempts to estimate the performance of evaluation policies using their action distributions.\\n\",\n+ \"OPE attempts to estimate the performance of evaluation policies using their action choices.\\n\",\n\"\\n\",\n- \"Here, we use the *InverseProbabilityWeighting (IPW)*, *DirectMethod (DM)*, and *Doubly Robust (DR)* estimators as OPE estimators and estiamte the performances of the evaluation policies. \\n\",\n- \"Finally, we summarize and visualize the results of OPE and evaluate the estimation accuracy of OPE.\"\n+ \"Here, we use the *InverseProbabilityWeighting (IPW)*, *DirectMethod (DM)*, and *Doubly Robust (DR)* estimators and estimate the performances of the pre-trained evaluation policies. \\n\",\n+ \"Finally, we summarize and visualize the results of OPE and evaluate their estimation accuracies.\"\n]\n},\n{\n\"outputs\": [],\n\"source\": [\n\"# estimate the mean reward function by using ML model (Logistic Regression here)\\n\",\n- \"# the estimated reward is used by model-dependent estimators such as DM and DR\\n\",\n+ \"# the estimated rewards are used by model-dependent estimators such as DM and DR\\n\",\n\"regression_model = RegressionModel(\\n\",\n\" n_actions=dataset.n_actions,\\n\",\n\" len_list=dataset.len_list,\\n\",\n},\n\"outputs\": [],\n\"source\": [\n- \"# estimate the policy value of the evaluation policies based on their distributions over actions\\n\",\n+ \"# estimate the policy value of the evaluation policies based on their action choice probabilities\\n\",\n\"# it is possible to set multiple OPE estimators to the `ope_estimators` argument\\n\",\n\"ope = OffPolicyEvaluation(\\n\",\n\" bandit_feedback=bandit_feedback_test,\\n\",\n}\n],\n\"source\": [\n- \"# estimated confidence intervals of policy value of IPWLearner with Logistic Regression by OPE estimators\\n\",\n+ \"# estimate the policy value of IPWLearner with Logistic Regression\\n\",\n\"estimated_policy_value_a, estimated_interval_a = ope.summarize_off_policy_estimates(\\n\",\n\" action_dist=action_dist_a,\\n\",\n\" estimated_rewards_by_reg_model=estimated_rewards_by_reg_model\\n\",\n}\n],\n\"source\": [\n- \"# estimated confidence intervals of policy value of IPWLearner with Random Forest by OPE estimators\\n\",\n+ \"# estimate the policy value of IPWLearner with Random Forest by OPE estimators\\n\",\n\"estimated_policy_value_b, estimated_interval_b = ope.summarize_off_policy_estimates(\\n\",\n\" action_dist=action_dist_b,\\n\",\n\" estimated_rewards_by_reg_model=estimated_rewards_by_reg_model\\n\",\n}\n],\n\"source\": [\n- \"# estimated confidence intervals of policy value of Uniform Random by OPE estimators\\n\",\n+ \"# estimate the policy value of Uniform Random\\n\",\n\"estimated_policy_value_c, estimated_interval_c = ope.summarize_off_policy_estimates(\\n\",\n\" action_dist=action_dist_c,\\n\",\n\" estimated_rewards_by_reg_model=estimated_rewards_by_reg_model\\n\",\n\"metadata\": {},\n\"source\": [\n\"## (4) Evaluation of OPE estimators\\n\",\n- \"Our final step is the evaluation of OPE estimators, which evaluates and compares the estimatin accuracy of the OPE estimators.\\n\",\n+ \"Our final step is **the evaluation of OPE**, which evaluates and compares the estimation accuracies of OPE estimators.\\n\",\n\"\\n\",\n\"With the synthetic data, we can know the ground-truth policy values of the evaluation policies. \\n\",\n- \"Therefore, we can compare the estimated policy values by OPE estimators with the ground-turths to evaluate the accuracy of the OPE estimators.\"\n+ \"Therefore, we can compare the estimated policy values by OPE estimators with the ground-turths to evaluate OPE estimators.\"\n]\n},\n{\n\"source\": [\n\"In fact, IPWLearner with Random Forest reveals the best performance among the three evaluation policies.\\n\",\n\"\\n\",\n- \"Using the above ground-truths, we can now evaluate the estimation accuracy of the OPE estimators.\"\n+ \"Using the above ground-truths, we can now evaluate the estimation accuracy of the estimators.\"\n]\n},\n{\n],\n\"source\": [\n\"# evaluate the estimation performances of OPE estimators \\n\",\n- \"# by comparing the estimated policy value of IPWLearner with Logistic Regression and its ground-truth.\\n\",\n+ \"# by comparing the estimated policy values of IPWLearner with Logistic Regression and its ground-truth.\\n\",\n\"# `evaluate_performance_of_estimators` returns a dictionary containing the relative estimation error of OPE estimators \\n\",\n\"relative_estimation_errors_a = ope.summarize_estimators_comparison(\\n\",\n\" ground_truth_policy_value=ground_truth_a,\\n\",\n],\n\"source\": [\n\"# evaluate the estimation performance of OPE estimators \\n\",\n- \"# by comparing the estimated policy value of IPWLearner with Randon Forest and its ground-truth.\\n\",\n+ \"# by comparing the estimated policy values of IPWLearner with Random Forest and its ground-truth.\\n\",\n\"# `evaluate_performance_of_estimators` returns a dictionary containing the relative estimation error of given estimators \\n\",\n\"relative_estimation_errors_b = ope.summarize_estimators_comparison(\\n\",\n\" ground_truth_policy_value=ground_truth_b,\\n\",\n\")\\n\",\n\"\\n\",\n\"# Doubly Robust reveals the best estimation performance (lower is better)\\n\",\n- \"# when evaluating the performance of IPWLearner with Randon Forest\\n\",\n+ \"# when evaluating the performance of IPWLearner with Random Forest\\n\",\n\"relative_estimation_errors_b\"\n]\n},\n],\n\"source\": [\n\"# evaluate the estimation performance of OPE estimators \\n\",\n- \"# by comparing the estimated policy value of Uniform Random and its ground-truth.\\n\",\n+ \"# by comparing the estimated policy values of Uniform Random and its ground-truth.\\n\",\n\"# `evaluate_performance_of_estimators` returns a dictionary containing the relative estimation error of given estimators \\n\",\n\"relative_estimation_errors_c = ope.summarize_estimators_comparison(\\n\",\n\" ground_truth_policy_value=ground_truth_c,\\n\",\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix typos in quickstarts
641,002
05.11.2020 16:46:08
-32,400
867fd0338f7d9d454adad162c4f8278ba25cc567
add random_state as argument to fit_predict
[ { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -213,6 +213,7 @@ class RegressionModel(BaseEstimator):\nposition: Optional[np.ndarray] = None,\naction_dist: Optional[np.ndarray] = None,\nn_folds: int = 1,\n+ random_state: int = 12345,\n) -> None:\n\"\"\"Fit the regression model on given logged bandit feedback data and then predict the mean reward function of the same data.\n@@ -286,7 +287,7 @@ class RegressionModel(BaseEstimator):\nestimated_rewards_by_reg_model = np.zeros(\n(context.shape[0], self.n_actions, self.len_list)\n)\n- skf = KFold(n_splits=n_folds, shuffle=True)\n+ skf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)\nskf.get_n_splits(context)\nfor train_idx, test_idx in skf.split(context, reward):\naction_dist_tr = (\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add random_state as argument to fit_predict
641,014
05.11.2020 22:08:50
-32,400
c88b399534680e674913d0f4312aee707fee154a
update READMEs about the data
[ { "change_type": "MODIFY", "old_path": "obd/README.md", "new_path": "obd/README.md", "diff": "# Open Bandit Dataset\n-This directory contains the small size (10,000 records for each pair of campaign and behavior policy) version of our data that can be used for running [examples](https://github.com/st-tech/zr-obp/tree/master/examples).\n+This directory contains the small size (10,000 records for each pair of campaign and behavior policy) version of our data that can be used for running our [examples](https://github.com/st-tech/zr-obp/tree/master/examples/examples_with_obd).\nThe full size version of our data is available at [https://research.zozo.com/data.html](https://research.zozo.com/data.html).\nThis dataset is released along with the paper:\n-Yuta Saito, Shunsuke Aihara, Megumi Matsutani, Yusuke Narita.\n-**A Large-scale Open Dataset for Bandit Algorithms.** [https://arxiv.org/abs/2008.07146](https://arxiv.org/abs/2008.07146)\n+Yuta Saito, Shunsuke Aihara, Megumi Matsutani, Yusuke Narita. <br>\n+**Large-scale Open Dataset, Pipeline, and Benchmark for Bandit Algorithms** <br>\n+[https://arxiv.org/abs/2008.07146](https://arxiv.org/abs/2008.07146)\nWhen using this dataset, please cite the paper with following bibtex:\n```\n@article{saito2020large,\n- title={A Large-scale Open Dataset for Bandit Algorithms},\n+ title={Large-scale Open Dataset, Pipeline, and Benchmark for Bandit Algorithms},\nauthor={Saito, Yuta, Shunsuke Aihara, Megumi Matsutani, Yusuke Narita},\njournal={arXiv preprint arXiv:2008.07146},\nyear={2020}\n@@ -20,9 +21,9 @@ When using this dataset, please cite the paper with following bibtex:\n```\n## Data description\n-Open Bandit Dataset is constructed in an A/B test of two multi-armed bandit policies in a large-scale fashion e-commerce platform, [ZOZOTOWN](https://zozo.jp/).\n-It currently consists of a total of 26M rows, each one representing a user impression with some feature values, selected items as actions, true propensity scores, and click indicators as an outcome.\n-This is especially suitable for evaluating *off-policy evaluation* (OPE), which attempts to estimate the counterfactual performance of hypothetical algorithms using data generated by a different algorithm in use.\n+Open Bandit Dataset is constructed in an A/B test of two multi-armed bandit policies on a large-scale fashion e-commerce platform, [ZOZOTOWN](https://zozo.jp/).\n+It currently consists of a total of about 26M rows, each one representing a user impression with some feature values, selected items as actions, true propensity scores, and click indicators as an outcome.\n+This is especially suitable for evaluating *off-policy evaluation* (OPE), which attempts to estimate the counterfactual performance of hypothetical algorithms using data generated by a different algorithm.\n## Fields\n@@ -51,8 +52,10 @@ Here is a detailed description of the fields (they are comma-separated in the CS\n- item feature 0-3: item related feature values\n+Note that user and item features are now anonymized using a hash function.\n+\n## Contact\nFor any question, feel free to contact:\n-The authors of the paper: [email protected]\n-ZOZO Research: [email protected]\n+- The authors of the paper: [email protected]\n+- ZOZO Research: [email protected]\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
update READMEs about the data
641,014
07.11.2020 04:00:02
-32,400
7f771352a7a2559067ee2ac0ad91729a95672d59
add new eval metric for ope
[ { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -9,7 +9,7 @@ from pathlib import Path\nimport matplotlib.pyplot as plt\nimport numpy as np\n-import pandas as pd\n+from pandas import DataFrame\nimport seaborn as sns\nfrom .estimators import BaseOffPolicyEstimator\n@@ -206,7 +206,7 @@ class OffPolicyEvaluation:\nalpha: float = 0.05,\nn_bootstrap_samples: int = 100,\nrandom_state: Optional[int] = None,\n- ) -> Tuple[pd.DataFrame, pd.DataFrame]:\n+ ) -> Tuple[DataFrame, DataFrame]:\n\"\"\"Summarize estimated_policy_values and their confidence intervals in off-policy evaluation by given estimators.\nParameters\n@@ -229,21 +229,21 @@ class OffPolicyEvaluation:\nReturns\n----------\n- (policy_value_df, policy_value_interval_df): Tuple[pd.DataFrame, pd.DataFrame]\n+ (policy_value_df, policy_value_interval_df): Tuple[DataFrame, DataFrame]\nEstimated policy values and their confidence intervals by off-policy estimators.\n\"\"\"\nassert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\nassert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n- policy_value_df = pd.DataFrame(\n+ policy_value_df = DataFrame(\nself.estimate_policy_values(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n),\nindex=[\"estimated_policy_value\"],\n)\n- policy_value_interval_df = pd.DataFrame(\n+ policy_value_interval_df = DataFrame(\nself.estimate_intervals(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n@@ -260,10 +260,10 @@ class OffPolicyEvaluation:\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: Optional[np.ndarray] = None,\nalpha: float = 0.05,\n- relative: bool = False,\n+ is_relative: bool = False,\nn_bootstrap_samples: int = 100,\nfig_dir: Optional[Path] = None,\n- fig_name: Optional[str] = None,\n+ fig_name: str = \"estimated_policy_value.png\",\n) -> None:\n\"\"\"Visualize estimated policy values by given off-policy estimators.\n@@ -282,7 +282,7 @@ class OffPolicyEvaluation:\nn_bootstrap_samples: int, default: 100\nNumber of resampling performed in the bootstrap procedure.\n- relative: bool, default: False,\n+ is_relative: bool, default: False,\nIf True, the method visualizes the estimated policy values of evaluation policy\nrelative to the ground-truth policy value of behavior policy.\n@@ -290,9 +290,8 @@ class OffPolicyEvaluation:\nPath to store the bar figure.\nIf 'None' is given, the figure will not be saved.\n- fig_name: str, default: None\n+ fig_name: str, default: \"estimated_policy_value.png\"\nName of the bar figure.\n- If 'None' is given, 'estimated_policy_value.png' will be used.\n\"\"\"\nassert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\n@@ -315,12 +314,12 @@ class OffPolicyEvaluation:\nestimated_round_rewards_dict[\nestimator_name\n] = estimator._estimate_round_rewards(**estimator_inputs)\n- estimated_round_rewards_df = pd.DataFrame(estimated_round_rewards_dict)\n+ estimated_round_rewards_df = DataFrame(estimated_round_rewards_dict)\nestimated_round_rewards_df.rename(\ncolumns={key: key.upper() for key in estimated_round_rewards_dict.keys()},\ninplace=True,\n)\n- if relative:\n+ if is_relative:\nestimated_round_rewards_df /= self.bandit_feedback[\"reward\"].mean()\nplt.style.use(\"ggplot\")\n@@ -339,9 +338,6 @@ class OffPolicyEvaluation:\nplt.xticks(fontsize=25 - 2 * len(self.ope_estimators))\nif fig_dir:\n- fig_name = (\n- fig_name if fig_name is not None else \"estimated_policy_value.png\"\n- )\nfig.savefig(str(fig_dir / fig_name))\ndef evaluate_performance_of_estimators(\n@@ -349,6 +345,7 @@ class OffPolicyEvaluation:\nground_truth_policy_value: float,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ metric: str = \"relative-ee\",\n) -> Dict[str, float]:\n\"\"\"Evaluate estimation accuracies of off-policy estimators.\n@@ -378,10 +375,14 @@ class OffPolicyEvaluation:\nEstimated expected rewards for the given logged bandit feedback at each item and position by regression model.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ metric: str, default=\"relative-ee\"\n+ Evaluation metric to evaluate and compare the estimation performance of off-policy estimators.\n+ Must be \"relative-ee\" or \"se\".\n+\nReturns\n----------\n- relative_estimation_error_dict: Dict[str, float]\n- Dictionary containing relative estimation error of off-policy estimators.\n+ eval_metric_ope_dict: Dict[str, float]\n+ Dictionary containing evaluation metric for evaluating the estimation performance of off-policy estimators.\n\"\"\"\nassert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\n@@ -389,30 +390,38 @@ class OffPolicyEvaluation:\nassert isinstance(\nground_truth_policy_value, float\n), \"ground_truth_policy_value must be a float\"\n+ assert metric in [\n+ \"relative-ee\",\n+ \"se\",\n+ ], \"metric must be either 'relative-ee' or 'se'\"\nif estimated_rewards_by_reg_model is None:\nlogger.warning(\n\"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n)\n- relative_estimation_error_dict = dict()\n+ eval_metric_ope_dict = dict()\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\nestimated_policy_value = estimator.estimate_policy_value(**estimator_inputs)\n- relative_estimation_error_dict[estimator_name] = np.abs(\n- (estimated_policy_value - ground_truth_policy_value)\n- / ground_truth_policy_value\n- )\n- return relative_estimation_error_dict\n+ if metric == \"relative-ee\":\n+ relative_ee_ = estimated_policy_value - ground_truth_policy_value\n+ relative_ee_ /= ground_truth_policy_value\n+ eval_metric_ope_dict[estimator_name] = np.abs(relative_ee_)\n+ elif metric == \"se\":\n+ se_ = (estimated_policy_value - ground_truth_policy_value) ** 2\n+ eval_metric_ope_dict[estimator_name] = se_\n+ return eval_metric_ope_dict\ndef summarize_estimators_comparison(\nself,\nground_truth_policy_value: float,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n- ) -> pd.DataFrame:\n+ metric: str = \"relative-ee\",\n+ ) -> DataFrame:\n\"\"\"Summarize performance comparison of off-policy estimators.\nParameters\n@@ -428,21 +437,30 @@ class OffPolicyEvaluation:\nEstimated expected rewards for the given logged bandit feedback at each item and position by regression model.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ metric: str, default=\"relative-ee\"\n+ Evaluation metric to evaluate and compare the estimation performance of off-policy estimators.\n+ Must be \"relative-ee\" or \"se\".\n+\nReturns\n----------\n- relative_estimation_error_df: pd.DataFrame\n- Estimated policy values and their confidence intervals by off-policy estimators.\n+ eval_metric_ope_df: DataFrame\n+ Evaluation metric for evaluating the estimation performance of off-policy estimators.\n\"\"\"\nassert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\nassert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n+ assert metric in [\n+ \"relative-ee\",\n+ \"se\",\n+ ], \"metric must be either 'relative-ee' or 'se'\"\n- relative_estimation_error_df = pd.DataFrame(\n+ eval_metric_ope_df = DataFrame(\nself.evaluate_performance_of_estimators(\nground_truth_policy_value=ground_truth_policy_value,\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ metric=metric,\n),\nindex=[\"relative_estimation_error\"],\n)\n- return relative_estimation_error_df.T\n+ return eval_metric_ope_df.T\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add new eval metric for ope
641,014
07.11.2020 05:13:24
-32,400
83f470be64c71a8e9253334f4e144201412fa82f
refactor estimators
[ { "change_type": "MODIFY", "old_path": "obp/ope/estimators.py", "new_path": "obp/ope/estimators.py", "diff": "@@ -91,10 +91,10 @@ class ReplayMethod(BaseOffPolicyEstimator):\naction_match = np.array(\naction_dist[np.arange(action.shape[0]), action, position] == 1\n)\n- round_rewards = np.zeros_like(action_match)\n+ estimated_rewards = np.zeros_like(action_match)\nif action_match.sum() > 0.0:\n- round_rewards = action_match * reward / action_match.mean()\n- return round_rewards\n+ estimated_rewards = action_match * reward / action_match.mean()\n+ return estimated_rewards\ndef estimate_policy_value(\nself,\n@@ -210,15 +210,8 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\n\"\"\"\n- min_iw: float = 0.0\nestimator_name: str = \"ipw\"\n- def __post_init__(self) -> None:\n- \"\"\"Initialize Class.\"\"\"\n- assert (\n- self.min_iw >= 0.0\n- ), f\"minimum propensity score must be larger than or equal to zero, but {self.min_iw} is given\"\n-\ndef _estimate_round_rewards(\nself,\nreward: np.ndarray,\n@@ -250,10 +243,8 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nRewards estimated by IPW for each round.\n\"\"\"\n- importance_weight = (\n- action_dist[np.arange(action.shape[0]), action, position] / pscore\n- )\n- return reward * np.maximum(importance_weight, self.min_iw)\n+ iw = action_dist[np.arange(action.shape[0]), action, position] / pscore\n+ return reward * iw\ndef estimate_policy_value(\nself,\n@@ -430,10 +421,8 @@ class SelfNormalizedInverseProbabilityWeighting(InverseProbabilityWeighting):\nRewards estimated by the SNIPW estimator for each round.\n\"\"\"\n- importance_weight = (\n- action_dist[np.arange(action.shape[0]), action, position] / pscore\n- )\n- return reward * importance_weight / importance_weight.mean()\n+ iw = action_dist[np.arange(action.shape[0]), action, position] / pscore\n+ return reward * iw / iw.mean()\n@dataclass\n@@ -504,15 +493,11 @@ class DirectMethod(BaseOffPolicyEstimator):\n\"\"\"\nn_rounds = position.shape[0]\n- estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ q_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n]\n- action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\n- return np.average(\n- estimated_rewards_by_reg_model_at_position,\n- weights=action_dist_at_position,\n- axis=1,\n- )\n+ pi_e_at_position = action_dist[np.arange(n_rounds), :, position]\n+ return np.average(q_hat_at_position, weights=pi_e_at_position, axis=1,)\ndef estimate_policy_value(\nself,\n@@ -639,13 +624,8 @@ class DoublyRobust(InverseProbabilityWeighting):\n\"\"\"\n- min_iw: float = 0.0\nestimator_name: str = \"dr\"\n- def __post_init__(self) -> None:\n- \"\"\"Initialize Class.\"\"\"\n- super().__post_init__()\n-\ndef _estimate_round_rewards(\nself,\nreward: np.ndarray,\n@@ -684,26 +664,20 @@ class DoublyRobust(InverseProbabilityWeighting):\nRewards estimated by the DR estimator for each round.\n\"\"\"\n- n_rounds = position.shape[0]\n- estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ n_rounds = action.shape[0]\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ q_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n]\n- action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\n- round_rewards = np.average(\n- estimated_rewards_by_reg_model_at_position,\n- weights=action_dist_at_position,\n- axis=1,\n- )\n- importance_weight = (\n- action_dist[np.arange(action.shape[0]), action, position] / pscore\n- )\n- estimated_observed_rewards = estimated_rewards_by_reg_model[\n- np.arange(action.shape[0]), action, position\n+ q_hat_factual = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), action, position\n]\n- round_rewards += np.maximum(importance_weight, self.min_iw) * (\n- reward - estimated_observed_rewards\n+ pi_e_at_position = action_dist[np.arange(n_rounds), :, position]\n+ estimated_rewards = np.average(\n+ q_hat_at_position, weights=pi_e_at_position, axis=1,\n)\n- return round_rewards\n+ estimated_rewards += iw * (reward - q_hat_factual)\n+ return estimated_rewards\ndef estimate_policy_value(\nself,\n@@ -894,24 +868,20 @@ class SelfNormalizedDoublyRobust(DoublyRobust):\nRewards estimated by the SNDR estimator for each round.\n\"\"\"\n- n_rounds = position.shape[0]\n- estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ n_rounds = action.shape[0]\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ q_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n]\n- action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\n- round_rewards = np.average(\n- estimated_rewards_by_reg_model_at_position,\n- weights=action_dist_at_position,\n- axis=1,\n- )\n- importance_weight = (\n- action_dist[np.arange(action.shape[0]), action, position] / pscore\n+ pi_e_at_position = action_dist[np.arange(n_rounds), :, position]\n+ estimated_rewards = np.average(\n+ q_hat_at_position, weights=pi_e_at_position, axis=1,\n)\n- estimated_observed_rewards = estimated_rewards_by_reg_model[\n- np.arange(action.shape[0]), action, position\n+ q_hat_factual = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), action, position\n]\n- round_rewards += importance_weight * (reward - estimated_observed_rewards)\n- return round_rewards * importance_weight.mean()\n+ estimated_rewards += iw * (reward - q_hat_factual)\n+ return estimated_rewards / iw.mean()\n@dataclass\n@@ -1000,23 +970,18 @@ class SwitchInverseProbabilityWeighting(DoublyRobust):\nRewards estimated by the Switch-IPW estimator for each round.\n\"\"\"\n- n_rounds = position.shape[0]\n- importance_weight = (\n- action_dist[np.arange(action.shape[0]), action, position] / pscore\n- )\n- switch_indicator = np.array(importance_weight <= self.tau, dtype=int)\n- estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ n_rounds = action.shape[0]\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ switch_indicator = np.array(iw <= self.tau, dtype=int)\n+ q_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n]\n- action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\n- round_rewards = np.average(\n- estimated_rewards_by_reg_model_at_position,\n- weights=action_dist_at_position,\n- axis=1,\n+ pi_e_at_position = action_dist[np.arange(n_rounds), :, position]\n+ estimated_rewards = (1 - switch_indicator) * np.average(\n+ q_hat_at_position, weights=pi_e_at_position, axis=1,\n)\n- round_rewards *= 1 - switch_indicator\n- round_rewards += switch_indicator * importance_weight * reward\n- return round_rewards\n+ estimated_rewards += switch_indicator * iw * reward\n+ return estimated_rewards\n@dataclass\n@@ -1106,27 +1071,21 @@ class SwitchDoublyRobust(DoublyRobust):\nRewards estimated by the Switch-DR estimator for each round.\n\"\"\"\n- n_rounds = position.shape[0]\n- estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ n_rounds = action.shape[0]\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ switch_indicator = np.array(iw <= self.tau, dtype=int)\n+ q_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n]\n- action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\n- round_rewards = np.average(\n- estimated_rewards_by_reg_model_at_position,\n- weights=action_dist_at_position,\n- axis=1,\n- )\n- importance_weight = (\n- action_dist[np.arange(action.shape[0]), action, position] / pscore\n- )\n- estimated_observed_rewards = estimated_rewards_by_reg_model[\n- np.arange(action.shape[0]), action, position\n+ q_hat_factual = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), action, position\n]\n- switch_indicator = np.array(importance_weight <= self.tau, dtype=int)\n- round_rewards += (\n- switch_indicator * importance_weight * (reward - estimated_observed_rewards)\n+ pi_e_at_position = action_dist[np.arange(n_rounds), :, position]\n+ estimated_rewards = np.average(\n+ q_hat_at_position, weights=pi_e_at_position, axis=1,\n)\n- return round_rewards\n+ estimated_rewards += switch_indicator * iw * (reward - q_hat_factual)\n+ return estimated_rewards\n@dataclass\n@@ -1228,24 +1187,18 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nRewards estimated by the DRos estimator for each round.\n\"\"\"\n- n_rounds = position.shape[0]\n- estimated_rewards_by_reg_model_at_position = estimated_rewards_by_reg_model[\n+ n_rounds = action.shape[0]\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ shrinkage_weight = (self.lambda_ * iw) / (iw ** 2 + self.lambda_)\n+ q_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n]\n- action_dist_at_position = action_dist[np.arange(n_rounds), :, position]\n- round_rewards = np.average(\n- estimated_rewards_by_reg_model_at_position,\n- weights=action_dist_at_position,\n- axis=1,\n- )\n- importance_weight = (\n- action_dist[np.arange(action.shape[0]), action, position] / pscore\n- )\n- shrinkage_weight = (self.lambda_ * importance_weight) / (\n- importance_weight ** 2 + self.lambda_\n- )\n- estimated_observed_rewards = estimated_rewards_by_reg_model[\n- np.arange(action.shape[0]), action, position\n+ q_hat_factual = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), action, position\n]\n- round_rewards += shrinkage_weight * (reward - estimated_observed_rewards)\n- return round_rewards\n+ pi_e_at_position = action_dist[np.arange(n_rounds), :, position]\n+ estimated_rewards = np.average(\n+ q_hat_at_position, weights=pi_e_at_position, axis=1,\n+ )\n+ estimated_rewards += shrinkage_weight * (reward - q_hat_factual)\n+ return estimated_rewards\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
refactor estimators
641,014
07.11.2020 05:13:47
-32,400
6e9cdfd34d14960c64d084ca870fe68eb26449e7
add random_state for seaborn.barplot
[ { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -48,7 +48,7 @@ class OffPolicyEvaluation:\n>>> bandit_feedback.keys()\ndict_keys(['n_rounds', 'n_actions', 'action', 'position', 'reward', 'pscore', 'context', 'action_context'])\n- # (2) Offline Bandit Simulation\n+ # (2) Off-Policy Learning\n>>> evaluation_policy = BernoulliTS(\nn_actions=dataset.n_actions,\nlen_list=dataset.len_list,\n@@ -262,6 +262,7 @@ class OffPolicyEvaluation:\nalpha: float = 0.05,\nis_relative: bool = False,\nn_bootstrap_samples: int = 100,\n+ random_state: Optional[int] = None,\nfig_dir: Optional[Path] = None,\nfig_name: str = \"estimated_policy_value.png\",\n) -> None:\n@@ -282,6 +283,9 @@ class OffPolicyEvaluation:\nn_bootstrap_samples: int, default: 100\nNumber of resampling performed in the bootstrap procedure.\n+ random_state: int, default: None\n+ Controls the random seed in bootstrap sampling.\n+\nis_relative: bool, default: False,\nIf True, the method visualizes the estimated policy values of evaluation policy\nrelative to the ground-truth policy value of behavior policy.\n@@ -329,6 +333,7 @@ class OffPolicyEvaluation:\nax=ax,\nci=100 * (1 - alpha),\nn_boot=n_bootstrap_samples,\n+ seed=random_state,\n)\nplt.xlabel(\"OPE Estimators\", fontsize=25)\nplt.ylabel(\n@@ -461,6 +466,6 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\nmetric=metric,\n),\n- index=[\"relative_estimation_error\"],\n+ index=[metric],\n)\nreturn eval_metric_ope_df.T\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add random_state for seaborn.barplot
641,014
07.11.2020 18:19:10
-32,400
9636dde8269d0b6090df4234fe9a5c26cb6b5e0f
add continuous reward option
[ { "change_type": "MODIFY", "old_path": "obp/dataset/synthetic.py", "new_path": "obp/dataset/synthetic.py", "diff": "@@ -6,6 +6,7 @@ from dataclasses import dataclass\nfrom typing import Optional, Callable\nimport numpy as np\n+from scipy.stats import truncnorm\nfrom sklearn.utils import check_random_state\nfrom .base import BaseSyntheticBanditDataset\n@@ -33,8 +34,10 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\ndim_context: int, default: 1\nNumber of dimensions of context vectors.\n- dim_action_context: int, default: 1\n- Number of dimensions of vector representation for each action.\n+ reward_type: str, default: 'binary'\n+ Type of reward variable, must be either 'binary' or 'continuous'.\n+ When 'binary' is given, rewards are sampled from the Bernoulli distribution.\n+ When 'continuous' is given, rewards are sampled from the truncated Normal distribution with `scale=1`.\nreward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default: None\nFunction generating expected reward with context and action context vectors,\n@@ -45,8 +48,7 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nbehavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default: None\nFunction generating probability distribution over action space,\ni.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n- If None is set, context **independent** probability of choosing each action will be\n- sampled from the dirichlet distribution automatically (context-free behavior policy).\n+ If None is set, context **independent** uniform distribution will be used (uniform random behavior policy).\nrandom_state: int, default: None\nControls the random seed in sampling synthetic bandit dataset.\n@@ -109,7 +111,7 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nn_actions: int\ndim_context: int = 1\n- dim_action_context: int = 1\n+ reward_type: str = \"binary\"\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n@@ -125,35 +127,28 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nassert self.dim_context > 0 and isinstance(\nself.dim_context, int\n), f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n- assert self.dim_action_context > 0 and isinstance(\n- self.dim_action_context, int\n- ), f\"dim_action_context must be a positive integer, but {self.dim_action_context} is given\"\n+ assert self.reward_type in [\n+ \"binary\",\n+ \"continuous\",\n+ ], f\"reward_type must be either 'binary' or 'continuous, but {self.reward_type} is given.'\"\nself.random_ = check_random_state(self.random_state)\nif self.reward_function is None:\nself.expected_reward = self.sample_contextfree_expected_reward()\nif self.behavior_policy_function is None:\n- self.behavior_policy = self.sample_contextfree_behavior_policy()\n- self.action_context = self.sample_action_context()\n+ self.behavior_policy = np.ones(self.n_actions) / self.n_actions\n+ # one-hot encoding representations characterizing each action\n+ self.action_context = np.eye(self.n_actions, dtype=int)\n@property\ndef len_list(self) -> int:\n\"\"\"Length of recommendation lists.\"\"\"\nreturn 1\n- def sample_action_context(self) -> np.ndarray:\n- \"\"\"Sample action context vectors from the standard normal distribution.\"\"\"\n- return self.random_.normal(size=(self.n_actions, self.dim_action_context))\n-\ndef sample_contextfree_expected_reward(self) -> np.ndarray:\n\"\"\"Sample expected reward for each action from the uniform distribution.\"\"\"\nreturn self.random_.uniform(size=self.n_actions)\n- def sample_contextfree_behavior_policy(self) -> np.ndarray:\n- \"\"\"Sample probability of choosing each action from the dirichlet distribution.\"\"\"\n- alpha = self.random_.uniform(size=self.n_actions)\n- return self.random_.dirichlet(alpha=alpha)\n-\ndef obtain_batch_bandit_feedback(self, n_rounds: int) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\n@@ -168,14 +163,17 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nGenerated synthetic bandit feedback dataset.\n\"\"\"\n- context = self.random_.normal(size=(n_rounds, self.dim_context))\n+ assert n_rounds > 0 and isinstance(\n+ n_rounds, int\n+ ), f\"n_rounds must be a positive integer, but {n_rounds} is given\"\n+ context = self.random_.normal(size=(n_rounds, self.dim_context))\n# sample actions for each round based on the behavior policy\nif self.behavior_policy_function is None:\n+ behavior_policy_ = np.tile(self.behavior_policy, (n_rounds, 1))\naction = self.random_.choice(\nnp.arange(self.n_actions), p=self.behavior_policy, size=n_rounds\n)\n- pscore = self.behavior_policy[action]\nelse:\nbehavior_policy_ = self.behavior_policy_function(\ncontext=context,\n@@ -194,16 +192,28 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\n# sample reward for each round based on the reward function\nif self.reward_function is None:\n- expected_reward_ = self.expected_reward\n- reward = self.random_.binomial(n=1, p=expected_reward_[action])\n+ expected_reward_ = np.tile(self.expected_reward, (n_rounds, 1))\nelse:\nexpected_reward_ = self.reward_function(\ncontext=context,\naction_context=self.action_context,\nrandom_state=self.random_state,\n)\n- reward = self.random_.binomial(\n- n=1, p=expected_reward_[np.arange(n_rounds), action]\n+ expected_reward_factual = expected_reward_[np.arange(n_rounds), action]\n+ if self.reward_type == \"binary\":\n+ reward = self.random_.binomial(n=1, p=expected_reward_factual)\n+ elif self.reward_type == \"continuous\":\n+ min_, max_ = 0, 1e10\n+ mean, std = expected_reward_factual, 1.0\n+ a, b = (min_ - mean) / std, (max_ - mean) / std\n+ reward = truncnorm.rvs(\n+ a=a, b=b, loc=mean, scale=std, random_state=self.random_state\n+ )\n+ # correct expected_reward_, as we use truncated normal distribution here\n+ mean = expected_reward_\n+ a, b = (min_ - mean) / std, (max_ - mean) / std\n+ expected_reward_ = truncnorm.stats(\n+ a=a, b=b, loc=mean, scale=std, moments=\"m\"\n)\nreturn dict(\nn_rounds=n_rounds,\n@@ -237,7 +247,7 @@ def logistic_reward_function(\nReturns\n---------\nexpected_reward: array-like, shape (n_rounds, n_actions)\n- Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q:=\\\\mathbb{E}[r|x,a]`.\n+ Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n\"\"\"\nassert (\n@@ -258,6 +268,46 @@ def logistic_reward_function(\nreturn sigmoid(logits)\n+def linear_reward_function(\n+ context: np.ndarray, action_context: np.ndarray, random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Linear mean reward function for synthetic bandit datasets.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation for each action.\n+\n+ random_state: int, default: None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_reward: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+\n+ \"\"\"\n+ assert (\n+ isinstance(context, np.ndarray) and context.ndim == 2\n+ ), \"context must be 2-dimensional ndarray\"\n+ assert (\n+ isinstance(action_context, np.ndarray) and action_context.ndim == 2\n+ ), \"action_context must be 2-dimensional ndarray\"\n+\n+ random_ = check_random_state(random_state)\n+ expected_reward = np.zeros((context.shape[0], action_context.shape[0]))\n+ # each arm has different coefficient vectors\n+ coef_ = random_.uniform(size=(action_context.shape[0], context.shape[1]))\n+ action_coef_ = random_.uniform(size=action_context.shape[1])\n+ for d in np.arange(action_context.shape[0]):\n+ expected_reward[:, d] = context @ coef_[d] + action_context[d] @ action_coef_\n+\n+ return expected_reward\n+\n+\ndef linear_behavior_policy(\ncontext: np.ndarray, action_context: np.ndarray, random_state: Optional[int] = None,\n) -> np.ndarray:\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add continuous reward option
641,014
08.11.2020 01:20:13
-32,400
16bfda887c41385d52886f3855599f3dc81f15bc
refactor dataset module
[ { "change_type": "MODIFY", "old_path": "obp/dataset/__init__.py", "new_path": "obp/dataset/__init__.py", "diff": "from .base import *\nfrom .real import *\nfrom .synthetic import *\n+from .multiclass import *\n" }, { "change_type": "MODIFY", "old_path": "obp/dataset/synthetic.py", "new_path": "obp/dataset/synthetic.py", "diff": "@@ -53,7 +53,7 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nrandom_state: int, default: None\nControls the random seed in sampling synthetic bandit dataset.\n- dataset_name: str, default: 'synthetic_contextual_bandit_dataset'\n+ dataset_name: str, default: 'synthetic_bandit_dataset'\nName of the dataset.\nExamples\n@@ -72,40 +72,51 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\n>>> dataset = SyntheticBanditDataset(\nn_actions=10,\ndim_context=5,\n- dim_action_context=5,\nreward_function=logistic_reward_function,\nbehavior_policy=linear_behavior_policy,\nrandom_state=12345\n)\n>>> bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=100000)\n- >>> print(bandit_feedback)\n- {'n_rounds': 100000,\n+ >>> bandit_feedback\n+ {\n+ 'n_rounds': 100000,\n'n_actions': 10,\n- 'context': array([[ 0.06987669, 0.24667411, -0.0118616 , 1.00481159, 1.32719461],\n- [-0.91926156, -1.54910644, 0.0221846 , 0.75836315, -0.66052433],\n- [ 0.86258008, -0.0100319 , 0.05000936, 0.67021559, 0.85296503],\n+ 'context': array([[-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057],\n+ [ 1.39340583, 0.09290788, 0.28174615, 0.76902257, 1.24643474],\n+ [ 1.00718936, -1.29622111, 0.27499163, 0.22891288, 1.35291684],\n...,\n- [ 0.09658876, 2.03636863, 0.40584106, -0.49167468, -0.44993244],\n- [-1.13892634, -1.71173775, -0.98117438, 1.84662775, -1.47738898],\n- [ 1.19581374, -2.24630358, 0.25097774, -0.12573204, -1.07518047]]),\n- 'action': array([0, 1, 5, ..., 9, 1, 1]),\n+ [ 1.36946256, 0.58727761, -0.69296769, -0.27519988, -2.10289159],\n+ [-0.27428715, 0.52635353, 1.02572168, -0.18486381, 0.72464834],\n+ [-1.25579833, -1.42455203, -0.26361242, 0.27928604, 1.21015571]]),\n+ 'action_context': array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n+ [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n+ [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n+ [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n+ [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n+ [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n+ [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n+ [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n+ [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]),\n+ 'action': array([7, 4, 0, ..., 7, 9, 6]),\n'position': array([0, 0, 0, ..., 0, 0, 0]),\n- 'reward': array([1, 0, 1, ..., 1, 1, 0]),\n- 'expected_reward': array([[0.79484127, 0.98710467, 0.91364645, ..., 0.80883287, 0.0262742 ,\n- 0.86335842],\n- [0.21316852, 0.63537277, 0.32594524, ..., 0.13998069, 0.00316771,\n- 0.55818704],\n- [0.84340111, 0.98274578, 0.92609427, ..., 0.74362081, 0.03999977,\n- 0.83685006],\n+ 'reward': array([0, 1, 1, ..., 0, 1, 0]),\n+ 'expected_reward': array([[0.80210203, 0.73828559, 0.83199558, ..., 0.81190503, 0.70617705,\n+ 0.68985306],\n+ [0.94119582, 0.93473317, 0.91345213, ..., 0.94140688, 0.93152449,\n+ 0.90132868],\n+ [0.87248862, 0.67974991, 0.66965669, ..., 0.79229752, 0.82712978,\n+ 0.74923536],\n...,\n- [0.66977957, 0.98321981, 0.96810184, ..., 0.47796594, 0.05266329,\n- 0.81784767],\n- [0.12054673, 0.473379 , 0.2343796 , ..., 0.15433855, 0.00100676,\n- 0.56626301],\n- [0.51637384, 0.58875776, 0.49215658, ..., 0.09978619, 0.01262061,\n- 0.46472179]]),\n- 'pscore': array([0.08443531, 0.42866938, 0.17304293, ..., 0.11438704, 0.42866938,\n- 0.42866938])}\n+ [0.64856003, 0.38145901, 0.84476094, ..., 0.40962057, 0.77114661,\n+ 0.65752798],\n+ [0.73208527, 0.82012699, 0.78161352, ..., 0.72361416, 0.8652249 ,\n+ 0.82571751],\n+ [0.40348366, 0.24485417, 0.24037926, ..., 0.49613133, 0.30714854,\n+ 0.5527749 ]]),\n+ 'pscore': array([0.05423855, 0.10339675, 0.09756788, ..., 0.05423855, 0.07250876,\n+ 0.14065505])\n+ }\n\"\"\"\n@@ -117,7 +128,7 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n] = None\nrandom_state: Optional[int] = None\n- dataset_name: str = \"synthetic_contextual_bandit_dataset\"\n+ dataset_name: str = \"synthetic_bandit_dataset\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
refactor dataset module
641,014
08.11.2020 01:44:48
-32,400
b5a3a66c8bb8321cb853e653d27aea3fa265ebc6
fix some docstring inconsistencies
[ { "change_type": "MODIFY", "old_path": "obp/dataset/multiclass.py", "new_path": "obp/dataset/multiclass.py", "diff": "@@ -24,16 +24,16 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nA machine learning classifier such as logistic regression is used to construct behavior and evaluation policies as follows.\n1. Split the original data into training (:math:`\\\\mathcal{D}_{\\\\mathrm{tr}}`) and evaluation (:math:`\\\\mathcal{D}_{\\\\mathrm{ev}}`) sets.\n- 2. Train classifiers on :math:`\\\\mathcal{D}_{\\\\mathrm{tr}}` and regard them as base deterministic policies :math:`\\\\pi_{\\\\mathrm{det},b}` and :math:`\\\\pi_{\\\\mathrm{det},e}`.\n- 3. Construct behavior (:math:`\\\\pi_{b}`) and evaluation (:math:`\\\\pi_{e}`) policies based on :math:`\\\\pi_{\\\\mathrm{det}}` as\n+ 2. Train classifiers on :math:`\\\\mathcal{D}_{\\\\mathrm{tr}}` and obtain base deterministic policies :math:`\\\\pi_{\\\\mathrm{det},b}` and :math:`\\\\pi_{\\\\mathrm{det},e}`.\n+ 3. Construct behavior (:math:`\\\\pi_{b}`) and evaluation (:math:`\\\\pi_{e}`) policies based on :math:`\\\\pi_{\\\\mathrm{det},b}` and :math:`\\\\pi_{\\\\mathrm{det},e}` as\n.. math::\n- \\\\pi_b (a | x) := \\\\alpha_b \\\\pi_{\\\\mathrm{det},b} (a|x) + (1.0 - \\\\alpha_b) \\\\pi_{u} (a|x)\n+ \\\\pi_b (a | x) := \\\\alpha_b \\\\cdot \\\\pi_{\\\\mathrm{det},b} (a|x) + (1.0 - \\\\alpha_b) \\\\cdot \\\\pi_{u} (a|x)\n.. math::\n- \\\\pi_e (a | x) := \\\\alpha_e \\\\pi_{\\\\mathrm{det},e} (a|x) + (1.0 - \\\\alpha_e) \\\\pi_{u} (a|x)\n+ \\\\pi_e (a | x) := \\\\alpha_e \\\\cdot \\\\pi_{\\\\mathrm{det},e} (a|x) + (1.0 - \\\\alpha_e) \\\\cdot \\\\pi_{u} (a|x)\nwhere :math:`\\\\pi_{u}` is a uniform random policy and :math:`\\\\alpha_b` and :math:`\\\\alpha_e` are set by the user.\n@@ -60,11 +60,11 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nbase_classifier_b: ClassifierMixin\nMachine learning classifier used to construct a behavior policy.\n- alpha_b: float, default: 0.9\n+ alpha_b: float, default=0.9\nRation of a uniform random policy when constructing a **behavior** policy.\nMust be in the [0, 1) interval to make the behavior policy a stochastic one.\n- dataset_name: str, default: None\n+ dataset_name: str, default=None\nName of the dataset.\nExamples\n@@ -187,7 +187,7 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nIf float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the evaluation split.\nIf int, represents the absolute number of test samples.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in train-evaluation split.\n\"\"\"\n@@ -213,12 +213,12 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nPlease call `self.split_train_eval()` before calling this method.\nParameters\n- ----------\n+ -----------\neval_size: float or int, default=0.25\nIf float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\nIf int, represents the absolute number of test samples.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\nReturns\n@@ -261,10 +261,12 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\n) -> np.ndarray:\n\"\"\"Obtain action choice probabilities by an evaluation policy.\n- base_classifier_e: ClassifierMixin, default: None\n+ Parameters\n+ -----------\n+ base_classifier_e: ClassifierMixin, default=None\nMachine learning classifier used to construct a behavior policy.\n- alpha_e: float, default: 1.0\n+ alpha_e: float, default=1.0\nRation of a uniform random policy when constructing an **evaluation** policy.\nMust be in the [0, 1] interval (evaluation policy can be deterministic).\n" }, { "change_type": "MODIFY", "old_path": "obp/dataset/real.py", "new_path": "obp/dataset/real.py", "diff": "@@ -32,10 +32,10 @@ class OpenBanditDataset(BaseRealBanditDataset):\ncampaign: str\nOne of the three possible campaigns considered in ZOZOTOWN, \"all\", \"men\", and \"women\".\n- data_path: Path, default: Path('./obd')\n+ data_path: Path, default=Path('./obd')\nPath that stores Open Bandit Dataset.\n- dataset_name: str, default: 'obd'\n+ dataset_name: str, default='obd'\nName of the dataset.\nReferences\n@@ -109,13 +109,13 @@ class OpenBanditDataset(BaseRealBanditDataset):\ncampaign: str\nOne of the three possible campaigns considered in ZOZOTOWN (i.e., \"all\", \"men\", and \"women\").\n- data_path: Path, default: Path('./obd')\n+ data_path: Path, default=Path('./obd')\nPath that stores Open Bandit Dataset.\ntest_size: float, default=0.3\nIf float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n- is_timeseries_split: bool, default: False\n+ is_timeseries_split: bool, default=False\nIf true, split the original logged badnit feedback data by time series.\nReturns\n@@ -178,7 +178,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\ntest_size: float, default=0.3\nIf float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n- is_timeseries_split: bool, default: False\n+ is_timeseries_split: bool, default=False\nIf true, split the original logged badnit feedback data by time series.\nReturns\n@@ -233,10 +233,10 @@ class OpenBanditDataset(BaseRealBanditDataset):\ntest_size: float, default=0.3\nIf float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n- is_timeseries_split: bool, default: False\n+ is_timeseries_split: bool, default=False\nIf true, split the original logged badnit feedback data by time series.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling logged bandit dataset.\nReturns\n" }, { "change_type": "MODIFY", "old_path": "obp/dataset/synthetic.py", "new_path": "obp/dataset/synthetic.py", "diff": "@@ -31,29 +31,29 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nn_actions: int\nNumber of actions.\n- dim_context: int, default: 1\n+ dim_context: int, default=1\nNumber of dimensions of context vectors.\n- reward_type: str, default: 'binary'\n+ reward_type: str, default='binary'\nType of reward variable, must be either 'binary' or 'continuous'.\nWhen 'binary' is given, rewards are sampled from the Bernoulli distribution.\nWhen 'continuous' is given, rewards are sampled from the truncated Normal distribution with `scale=1`.\n- reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default: None\n+ reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\nFunction generating expected reward with context and action context vectors,\ni.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\nIf None is set, context **independent** expected reward for each action will be\nsampled from the uniform distribution automatically.\n- behavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default: None\n+ behavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default=None\nFunction generating probability distribution over action space,\ni.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\nIf None is set, context **independent** uniform distribution will be used (uniform random behavior policy).\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling synthetic bandit dataset.\n- dataset_name: str, default: 'synthetic_bandit_dataset'\n+ dataset_name: str, default='synthetic_bandit_dataset'\nName of the dataset.\nExamples\n@@ -252,7 +252,7 @@ def logistic_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation for each action.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling dataset.\nReturns\n@@ -292,7 +292,7 @@ def linear_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation for each action.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling dataset.\nReturns\n@@ -332,7 +332,7 @@ def linear_behavior_policy(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation for each action.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling dataset.\nReturns\n" }, { "change_type": "MODIFY", "old_path": "obp/ope/estimators.py", "new_path": "obp/ope/estimators.py", "diff": "@@ -49,7 +49,7 @@ class ReplayMethod(BaseOffPolicyEstimator):\nParameters\n----------\n- estimator_name: str, default: 'rm'.\n+ estimator_name: str, default='rm'.\nName of off-policy estimator.\nReferences\n@@ -151,13 +151,13 @@ class ReplayMethod(BaseOffPolicyEstimator):\nposition: array-like, shape (n_rounds,)\nPositions of each round in the given logged bandit feedback.\n- alpha: float, default: 0.05\n+ alpha: float, default=0.05\nP-value.\n- n_bootstrap_samples: int, default: 10000\n+ n_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in bootstrap sampling.\nReturns\n@@ -197,7 +197,7 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nParameters\n------------\n- estimator_name: str, default: 'ipw'.\n+ estimator_name: str, default='ipw'.\nName of off-policy estimator.\nReferences\n@@ -320,13 +320,13 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nDistribution over actions or the action choice probabilities\nby the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a|x)`.\n- alpha: float, default: 0.05\n+ alpha: float, default=0.05\nP-value.\n- n_bootstrap_samples: int, default: 10000\n+ n_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in bootstrap sampling.\nReturns\n@@ -372,7 +372,7 @@ class SelfNormalizedInverseProbabilityWeighting(InverseProbabilityWeighting):\nParameters\n----------\n- estimator_name: str, default: 'snipw'.\n+ estimator_name: str, default='snipw'.\nName of off-policy estimator.\nReferences\n@@ -451,7 +451,7 @@ class DirectMethod(BaseOffPolicyEstimator):\nParameters\n----------\n- estimator_name: str, default: 'dm'.\n+ estimator_name: str, default='dm'.\nName of off-policy estimator.\nReferences\n@@ -554,13 +554,13 @@ class DirectMethod(BaseOffPolicyEstimator):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nEstimated rewards for each round, action, and position by regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n- alpha: float, default: 0.05\n+ alpha: float, default=0.05\nP-value.\n- n_bootstrap_samples: int, default: 10000\n+ n_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in bootstrap sampling.\nReturns\n@@ -611,7 +611,7 @@ class DoublyRobust(InverseProbabilityWeighting):\nParameters\n----------\n- estimator_name: str, default: 'dr'.\n+ estimator_name: str, default='dr'.\nName of off-policy estimator.\nReferences\n@@ -760,13 +760,13 @@ class DoublyRobust(InverseProbabilityWeighting):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nEstimated rewards for each round, action, and position by regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n- alpha: float, default: 0.05\n+ alpha: float, default=0.05\nP-value.\n- n_bootstrap_samples: int, default: 10000\n+ n_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in bootstrap sampling.\nReturns\n@@ -815,7 +815,7 @@ class SelfNormalizedDoublyRobust(DoublyRobust):\nParameters\n----------\n- estimator_name: str, default: 'sndr'.\n+ estimator_name: str, default='sndr'.\nName of off-policy estimator.\nReferences\n@@ -906,11 +906,11 @@ class SwitchInverseProbabilityWeighting(DoublyRobust):\nParameters\n----------\n- tau: float, default: 1\n+ tau: float, default=1\nSwitching hyperparameter. When importance weight is larger than this parameter, the DM estimator is applied, otherwise the IPW estimator is applied.\nThis hyperparameter should be larger than 1., otherwise it is meaningless.\n- estimator_name: str, default: 'switch-ipw'.\n+ estimator_name: str, default='switch-ipw'.\nName of off-policy estimator.\nReferences\n@@ -1007,11 +1007,11 @@ class SwitchDoublyRobust(DoublyRobust):\nParameters\n----------\n- tau: float, default: 1\n+ tau: float, default=1\nSwitching hyperparameter. When importance weight is larger than this parameter, the DM estimator is applied, otherwise the DR estimator is applied.\nThis hyperparameter should be larger than 0., otherwise it is meaningless.\n- estimator_name: str, default: 'switch-dr'.\n+ estimator_name: str, default='switch-dr'.\nName of off-policy estimator.\nReferences\n@@ -1127,7 +1127,7 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nlambda_: float\nShrinkage hyperparameter. This hyperparameter should be larger than 0., otherwise it is meaningless.\n- estimator_name: str, default: 'dr-os'.\n+ estimator_name: str, default='dr-os'.\nName of off-policy estimator.\nReferences\n" }, { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -161,13 +161,13 @@ class OffPolicyEvaluation:\nEstimated expected rewards for the given logged bandit feedback at each item and position by regression model.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n- alpha: float, default: 0.05\n+ alpha: float, default=0.05\nP-value.\n- n_bootstrap_samples: int, default: 100\n+ n_bootstrap_samples: int, default=100\nNumber of resampling performed in the bootstrap procedure.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in bootstrap sampling.\nReturns\n@@ -218,13 +218,13 @@ class OffPolicyEvaluation:\nEstimated expected rewards for the given logged bandit feedback at each item and position by regression model.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n- alpha: float, default: 0.05\n+ alpha: float, default=0.05\nP-value.\n- n_bootstrap_samples: int, default: 100\n+ n_bootstrap_samples: int, default=100\nNumber of resampling performed in the bootstrap procedure.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in bootstrap sampling.\nReturns\n@@ -277,24 +277,24 @@ class OffPolicyEvaluation:\nEstimated expected rewards for the given logged bandit feedback at each item and position by regression model.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n- alpha: float, default: 0.05\n+ alpha: float, default=0.05\nP-value.\n- n_bootstrap_samples: int, default: 100\n+ n_bootstrap_samples: int, default=100\nNumber of resampling performed in the bootstrap procedure.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in bootstrap sampling.\n- is_relative: bool, default: False,\n+ is_relative: bool, default=False,\nIf True, the method visualizes the estimated policy values of evaluation policy\nrelative to the ground-truth policy value of behavior policy.\n- fig_dir: Path, default: None\n+ fig_dir: Path, default=None\nPath to store the bar figure.\nIf 'None' is given, the figure will not be saved.\n- fig_name: str, default: \"estimated_policy_value.png\"\n+ fig_name: str, default=\"estimated_policy_value.png\"\nName of the bar figure.\n\"\"\"\n" }, { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -28,7 +28,7 @@ class RegressionModel(BaseEstimator):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n@@ -101,7 +101,7 @@ class RegressionModel(BaseEstimator):\nreward: array-like, shape (n_rounds,)\nObserved rewards (or outcome) in each round, i.e., :math:`r_t`.\n- pscore: Optional[np.ndarray], default: None\n+ pscore: Optional[np.ndarray], default=None\nPropensity scores, the action choice probabilities by behavior policy,\nin the training logged bandit feedback.\n" }, { "change_type": "MODIFY", "old_path": "obp/policy/base.py", "new_path": "obp/policy/base.py", "diff": "@@ -22,14 +22,14 @@ class BaseContextFreePolicy(metaclass=ABCMeta):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n\"\"\"\n@@ -95,20 +95,20 @@ class BaseContextualPolicy(metaclass=ABCMeta):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- alpha_: float, default: 1.\n+ alpha_: float, default=1.\nPrior parameter for the online logistic regression.\n- lambda_: float, default: 1.\n+ lambda_: float, default=1.\nRegularization hyperparameter for the online logistic regression.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n\"\"\"\n@@ -180,7 +180,7 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n@@ -234,7 +234,7 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\nreward: array-like, shape (n_actions,)\nObserved rewards (or outcome) in each round, i.e., :math:`r_t`.\n- pscore: array-like, shape (n_actions,), default: None\n+ pscore: array-like, shape (n_actions,), default=None\nPropensity scores or the action choice probabilities by behavior policy, i.e., :math:`\\\\pi_b(a_t|x_t)`.\nReturns\n@@ -266,7 +266,7 @@ class BaseOffPolicyLearner(metaclass=ABCMeta):\nreward: array-like, shape (n_rounds,)\nObserved rewards (or outcome) in each round, i.e., :math:`r_t`.\n- pscore: array-like, shape (n_rounds,), default: None\n+ pscore: array-like, shape (n_rounds,), default=None\nPropensity scores or the action choice probabilities by behavior policy, i.e., :math:`\\\\pi_b(a_t|x_t)`.\nposition: array-like, shape (n_rounds,), default=None\n" }, { "change_type": "MODIFY", "old_path": "obp/policy/contextfree.py", "new_path": "obp/policy/contextfree.py", "diff": "@@ -30,20 +30,20 @@ class EpsilonGreedy(BaseContextFreePolicy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n- epsilon: float, default: 1.\n+ epsilon: float, default=1.\nExploration hyperparameter that must take value in the range of [0., 1.].\n- policy_name: str, default: f'egreedy_{epsilon}'.\n+ policy_name: str, default=f'egreedy_{epsilon}'.\nName of bandit policy.\n\"\"\"\n@@ -105,20 +105,20 @@ class Random(EpsilonGreedy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n- epsilon: float, default: 1.\n+ epsilon: float, default=1.\nExploration hyperparameter that must take value in the range of [0., 1.].\n- policy_name: str, default: 'random'.\n+ policy_name: str, default='random'.\nName of bandit policy.\n\"\"\"\n@@ -132,7 +132,7 @@ class Random(EpsilonGreedy):\nParameters\n----------\n- n_rounds: int, default: 1\n+ n_rounds: int, default=1\nNumber of rounds in the distribution over actions.\n(the size of the first axis of `action_dist`)\n@@ -157,30 +157,30 @@ class BernoulliTS(BaseContextFreePolicy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n- alpha: array-like, shape (n_actions, ), default: None\n+ alpha: array-like, shape (n_actions, ), default=None\nPrior parameter vector for Beta distributions.\n- beta: array-like, shape (n_actions, ), default: None\n+ beta: array-like, shape (n_actions, ), default=None\nPrior parameter vector for Beta distributions.\n- is_zozotown_prior: bool, default: False\n+ is_zozotown_prior: bool, default=False\nWhether to use hyperparameters for the beta distribution used\nat the start of the data collection period in ZOZOTOWN.\n- campaign: str, default: None\n+ campaign: str, default=None\nOne of the three possible campaigns considered in ZOZOTOWN, \"all\", \"men\", and \"women\".\n- policy_name: str, default: 'bts'\n+ policy_name: str, default='bts'\nName of bandit policy.\n\"\"\"\n@@ -245,11 +245,11 @@ class BernoulliTS(BaseContextFreePolicy):\nParameters\n----------\n- n_rounds: int, default: 1\n+ n_rounds: int, default=1\nNumber of rounds in the distribution over actions.\n(the size of the first axis of `action_dist`)\n- n_sim: int, default: 100000\n+ n_sim: int, default=100000\nNumber of simulations in the Monte Carlo simulation to compute the distribution over actions.\nReturns\n" }, { "change_type": "MODIFY", "old_path": "obp/policy/linear.py", "new_path": "obp/policy/linear.py", "diff": "@@ -21,20 +21,20 @@ class LinEpsilonGreedy(BaseContextualPolicy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- n_trial: int, default: 0\n+ n_trial: int, default=0\nCurrent number of trials in a bandit simulation.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n- epsilon: float, default: 0.\n+ epsilon: float, default=0.\nExploration hyperparameter that must take value in the range of [0., 1.].\nReferences\n@@ -140,17 +140,17 @@ class LinUCB(BaseContextualPolicy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n- epsilon: float, default: 0.\n+ epsilon: float, default=0.\nExploration hyperparameter that must take value in the range of [0., 1.].\nReferences\n@@ -257,17 +257,17 @@ class LinTS(BaseContextualPolicy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- alpha_: float, default: 1.\n+ alpha_: float, default=1.\nPrior parameter for the online logistic regression.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n\"\"\"\n" }, { "change_type": "MODIFY", "old_path": "obp/policy/logistic.py", "new_path": "obp/policy/logistic.py", "diff": "@@ -25,23 +25,23 @@ class LogisticEpsilonGreedy(BaseContextualPolicy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- alpha_: float, default: 1.\n+ alpha_: float, default=1.\nPrior parameter for the online logistic regression.\n- lambda_: float, default: 1.\n+ lambda_: float, default=1.\nRegularization hyperparameter for the online logistic regression.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n- epsilon: float, default: 0.\n+ epsilon: float, default=0.\nExploration hyperparameter that must take value in the range of [0., 1.].\n\"\"\"\n@@ -131,23 +131,23 @@ class LogisticUCB(BaseContextualPolicy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- alpha_: float, default: 1.\n+ alpha_: float, default=1.\nPrior parameter for the online logistic regression.\n- lambda_: float, default: 1.\n+ lambda_: float, default=1.\nRegularization hyperparameter for the online logistic regression.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\n- epsilon: float, default: 0.\n+ epsilon: float, default=0.\nExploration hyperparameter that must take value in the range of [0., 1.].\nReferences\n@@ -244,20 +244,20 @@ class LogisticTS(BaseContextualPolicy):\nn_actions: int\nNumber of actions.\n- len_list: int, default: 1\n+ len_list: int, default=1\nLength of a list of recommended actions in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- batch_size: int, default: 1\n+ batch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- alpha_: float, default: 1.\n+ alpha_: float, default=1.\nPrior parameter for the online logistic regression.\n- lambda_: float, default: 1.\n+ lambda_: float, default=1.\nRegularization hyperparameter for the online logistic regression.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in sampling actions.\nReferences\n" }, { "change_type": "MODIFY", "old_path": "obp/policy/offline.py", "new_path": "obp/policy/offline.py", "diff": "@@ -51,7 +51,7 @@ class IPWLearner(BaseOffPolicyLearner):\nreward: array-like, shape (n_rounds,)\nObserved rewards (or outcome) in each round, i.e., :math:`r_t`.\n- pscore: array-like, shape (n_rounds,), default: None\n+ pscore: array-like, shape (n_rounds,), default=None\nPropensity scores, the probability of selecting each action by behavior policy,\nin the given logged bandit feedback.\n" }, { "change_type": "MODIFY", "old_path": "obp/utils.py", "new_path": "obp/utils.py", "diff": "@@ -24,13 +24,13 @@ def estimate_confidence_interval_by_bootstrap(\nsamples: array-like\nEmpirical observed samples to be used to estimate cumulative distribution function.\n- alpha: float, default: 0.05\n+ alpha: float, default=0.05\nP-value.\n- n_bootstrap_samples: int, default: 10000\n+ n_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n- random_state: int, default: None\n+ random_state: int, default=None\nControls the random seed in bootstrap sampling.\nReturns\n@@ -179,10 +179,10 @@ def check_bandit_feedback_inputs(\nreward: array-like, shape (n_rounds,)\nObserved rewards (or outcome) in each round, i.e., :math:`r_t`.\n- position: array-like, shape (n_rounds,), default: None\n+ position: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\n- pscore: array-like, shape (n_rounds,), default: None\n+ pscore: array-like, shape (n_rounds,), default=None\nPropensity scores, the probability of selecting each action by behavior policy,\nin the given logged bandit feedback.\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix some docstring inconsistencies
641,014
09.11.2020 23:01:20
-32,400
ddcda69155254064d1ebf3895b0ef509658600ec
fix ipw learner
[ { "change_type": "MODIFY", "old_path": "obp/policy/offline.py", "new_path": "obp/policy/offline.py", "diff": "\"\"\"Offline Bandit Algorithms.\"\"\"\nfrom dataclasses import dataclass\n-from typing import Tuple\n+from typing import Tuple, Optional\nimport numpy as np\n+from sklearn.base import clone, ClassifierMixin, is_classifier\n+from sklearn.linear_model import LogisticRegression\nfrom .base import BaseOffPolicyLearner\n+from ..utils import check_bandit_feedback_inputs\n@dataclass\n@@ -16,8 +19,15 @@ class IPWLearner(BaseOffPolicyLearner):\nParameters\n-----------\n- base_model: ClassifierMixin\n- Machine learning classifier to be used to create the decision making policy.\n+ n_actions: int\n+ Number of actions.\n+\n+ len_list: int, default=1\n+ Length of a list of recommended actions in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\n+ base_classifier: ClassifierMixin\n+ Machine learning classifier used to train an offline decision making policy.\nReferences\n------------\n@@ -26,9 +36,20 @@ class IPWLearner(BaseOffPolicyLearner):\n\"\"\"\n+ base_classifier: Optional[ClassifierMixin] = None\n+\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\nsuper().__post_init__()\n+ if self.base_classifier is None:\n+ self.base_classifier = LogisticRegression(random_state=12345)\n+ else:\n+ assert is_classifier(\n+ self.base_classifier\n+ ), \"base_classifier must be a classifier\"\n+ self.base_classifier_list = [\n+ clone(self.base_classifier) for _ in np.arange(self.len_list)\n+ ]\ndef _create_train_data_for_opl(\nself,\n@@ -62,3 +83,101 @@ class IPWLearner(BaseOffPolicyLearner):\n\"\"\"\nreturn context, (reward / pscore), action\n+\n+ def fit(\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: Optional[np.ndarray] = None,\n+ position: Optional[np.ndarray] = None,\n+ ) -> None:\n+ \"\"\"Fits the offline bandit policy according to the given logged bandit feedback data.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors in each round, i.e., :math:`x_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Sampled (realized) actions by behavior policy in each round, i.e., :math:`a_t`.\n+\n+ reward: array-like, shape (n_rounds,)\n+ Observed rewards (or outcome) in each round, i.e., :math:`r_t`.\n+\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Propensity scores or the action choice probabilities by behavior policy, i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+ If None is given, a learner assumes that there is only one position.\n+ When `len_list` > 1, position has to be set.\n+\n+ \"\"\"\n+ check_bandit_feedback_inputs(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ )\n+ if pscore is None:\n+ n_actions = np.int(action.max() + 1)\n+ pscore = np.ones_like(action) / n_actions\n+ if self.len_list == 1:\n+ position = np.zeros_like(action, dtype=int)\n+ else:\n+ assert (\n+ position is not None\n+ ), \"position has to be set when len_list is larger than 1\"\n+\n+ for position_ in np.arange(self.len_list):\n+ X, sample_weight, y = self._create_train_data_for_opl(\n+ context=context[position == position_],\n+ action=action[position == position_],\n+ reward=reward[position == position_],\n+ pscore=pscore[position == position_],\n+ )\n+ self.base_classifier_list[position_].fit(\n+ X=X, y=y, sample_weight=sample_weight\n+ )\n+\n+ def predict(self, context: np.ndarray, epsilon: float = 0.0) -> np.ndarray:\n+ \"\"\"Predict best action for new data.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds_of_new_data, dim_context)\n+ Context vectors for new data.\n+\n+ epsilon: float, default=0.0\n+ Exploration hyperparameter that must take value in the interval [0.0, 1.0].\n+ A positive value of epsilon makes the policy stochastic, making sure that the\n+ overlap condition is satisfied in the resulting logged bandit feedback.\n+\n+ Returns\n+ -----------\n+ action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Action choice probabilities by a trained classifier.\n+\n+ \"\"\"\n+ assert (\n+ isinstance(context, np.ndarray) and context.ndim == 2\n+ ), \"context must be 2-dimensional ndarray\"\n+ assert (0.0 <= epsilon <= 1.0) and isinstance(\n+ epsilon, float\n+ ), f\"epsilon must be a float in the interval [0.0, 1.0], but {epsilon} is given\"\n+\n+ n_rounds = context.shape[0]\n+ action_dist = np.ones((n_rounds, self.n_actions, self.len_list))\n+ action_dist *= epsilon * (1.0 / self.n_actions)\n+ for position_ in np.arange(self.len_list):\n+ predicted_actions_at_position = self.base_classifier_list[\n+ position_\n+ ].predict(context)\n+ action_dist[\n+ np.arange(n_rounds),\n+ predicted_actions_at_position,\n+ np.ones(n_rounds, dtype=int) * position_,\n+ ] += (1 - epsilon)\n+ return action_dist\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix ipw learner
641,014
10.11.2020 06:15:39
-32,400
45856d5c87fe55d43ef7fdc52c7b94b5295f0981
fix potential error in the fit_predict method of RegressionModel
[ { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -14,16 +14,16 @@ from ..utils import check_bandit_feedback_inputs\n@dataclass\nclass RegressionModel(BaseEstimator):\n- \"\"\"Machine learning model to estimate the mean reward function (:math:`q(x,a):= \\\\mathbb{E}_{r \\sim p(r|x,a)} [r|x,a]`).\n+ \"\"\"Machine learning model to estimate the mean reward function (:math:`q(x,a):= \\\\mathbb{E}[r|x,a]`).\nNote\n-------\n- Reward (or outcome) :math:`Y` must be either binary or continuous.\n+ Reward (or outcome) :math:`r` must be either binary or continuous.\nParameters\n------------\nbase_model: BaseEstimator\n- Model class to be used to estimate the mean reward function.\n+ A machine learning model used to estimate the mean reward function.\nn_actions: int\nNumber of actions.\n@@ -66,7 +66,7 @@ class RegressionModel(BaseEstimator):\n\"normal\",\n\"iw\",\n\"mrdr\",\n- ], f\"fitting method must be one of 'normal', 'iw', or 'mrdr', but {self.fitting_method} is given\"\n+ ], f\"fitting_method must be one of 'normal', 'iw', or 'mrdr', but {self.fitting_method} is given\"\nassert self.n_actions > 1 and isinstance(\nself.n_actions, int\n), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n@@ -101,9 +101,10 @@ class RegressionModel(BaseEstimator):\nreward: array-like, shape (n_rounds,)\nObserved rewards (or outcome) in each round, i.e., :math:`r_t`.\n- pscore: Optional[np.ndarray], default=None\n- Propensity scores, the action choice probabilities by behavior policy,\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Action choice probabilities (propensity score) of a behavior policy\nin the training logged bandit feedback.\n+ When None is given, the the behavior policy is assumed to be a uniform one.\nposition: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\n@@ -123,20 +124,26 @@ class RegressionModel(BaseEstimator):\nposition=position,\naction_context=self.action_context,\n)\n+ n_rounds = context.shape[0]\n+\nif self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\nassert (\n- position is not None\n- ), \"position has to be set when len_list is larger than 1\"\n+ isinstance(position, np.ndarray) and position.ndim == 1\n+ ), f\"when len_list > 1, position must be a 1-dimensional ndarray\"\nif self.fitting_method in [\"iw\", \"mrdr\"]:\nassert (\n- action_dist is not None\n- ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then action_dist must be given\"\n- assert (\n- pscore is not None\n- ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then pscore must be given\"\n- n_data = context.shape[0]\n+ isinstance(action_dist, np.ndarray) and action_dist.ndim == 3\n+ ), f\"when fitting_method is either 'iw' or 'mrdr', action_dist must be a 3-dimensional ndarray\"\n+ assert action_dist.shape == (\n+ n_rounds,\n+ self.n_actions,\n+ self.len_list,\n+ ), f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list})\"\n+ if pscore is None:\n+ pscore = np.ones_like(action) / self.n_actions\n+\nfor position_ in np.arange(self.len_list):\nidx = position == position_\nX = self._pre_process_for_reg_model(\n@@ -149,7 +156,9 @@ class RegressionModel(BaseEstimator):\nself.base_model_list[position_].fit(X, reward[idx])\nelse:\naction_dist_at_position = action_dist[\n- np.arange(n_data), action, position_ * np.ones(n_data, dtype=int)\n+ np.arange(n_rounds),\n+ action,\n+ position_ * np.ones(n_rounds, dtype=int),\n][idx]\nif self.fitting_method == \"iw\":\nsample_weight = action_dist_at_position / pscore[idx]\n@@ -157,11 +166,9 @@ class RegressionModel(BaseEstimator):\nX, reward[idx], sample_weight=sample_weight\n)\nelif self.fitting_method == \"mrdr\":\n- sample_weight = (\n- action_dist_at_position\n- * (1.0 - pscore[idx])\n- / (pscore[idx] ** 2)\n- )\n+ sample_weight = action_dist_at_position\n+ sample_weight *= 1.0 - pscore[idx]\n+ sample_weight /= pscore[idx] ** 2\nself.base_model_list[position_].fit(\nX, reward[idx], sample_weight=sample_weight\n)\n@@ -215,7 +222,7 @@ class RegressionModel(BaseEstimator):\nn_folds: int = 1,\nrandom_state: Optional[int] = None,\n) -> None:\n- \"\"\"Fit the regression model on given logged bandit feedback data and then predict the mean reward function of the same data.\n+ \"\"\"Fit the regression model on given logged bandit feedback data and predict the reward function of the same data.\nNote\n------\n@@ -234,8 +241,9 @@ class RegressionModel(BaseEstimator):\nObserved rewards (or outcome) in each round, i.e., :math:`r_t`.\npscore: array-like, shape (n_rounds,), default=None\n- Propensity scores, the action choice probabilities by behavior policy,\n+ Action choice probabilities (propensity score) of a behavior policy\nin the training logged bandit feedback.\n+ When None is given, the the behavior policy is assumed to be a uniform one.\nposition: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\n@@ -248,7 +256,7 @@ class RegressionModel(BaseEstimator):\nn_folds: int, default=1\nNumber of folds in the cross-fitting procedure.\n- When 1 is given, then the regression model is trained on the whole logged bandit feedback data.\n+ When 1 is given, the regression model is trained on the whole logged bandit feedback data.\nrandom_state: int, default=None\n`random_state` affects the ordering of the indices, which controls the randomness of each fold.\n@@ -260,6 +268,16 @@ class RegressionModel(BaseEstimator):\nEstimated expected rewards for new data by the regression model.\n\"\"\"\n+ check_bandit_feedback_inputs(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ action_context=self.action_context,\n+ )\n+ n_rounds = context.shape[0]\n+\nassert n_folds > 0 and isinstance(\nn_folds, int\n), f\"n_folds must be a positive integer, but {n_folds} is given\"\n@@ -267,15 +285,19 @@ class RegressionModel(BaseEstimator):\nposition = np.zeros_like(action)\nelse:\nassert (\n- position is not None\n- ), \"position has to be set when len_list is larger than 1\"\n+ isinstance(position, np.ndarray) and position.ndim == 1\n+ ), f\"when len_list > 1, position must be a 1-dimensional ndarray\"\nif self.fitting_method in [\"iw\", \"mrdr\"]:\nassert (\n- action_dist is not None\n- ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then action_dist must be given\"\n- assert (\n- pscore is not None\n- ), \"When either 'iw' or 'mrdr' is used as the 'fitting_method' argument, then pscore must be given\"\n+ isinstance(action_dist, np.ndarray) and action_dist.ndim == 3\n+ ), f\"when fitting_method is either 'iw' or 'mrdr', action_dist must be a 3-dimensional ndarray\"\n+ assert action_dist.shape == (\n+ n_rounds,\n+ self.n_actions,\n+ self.len_list,\n+ ), f\"shape of action_dist must be (n_rounds, n_actions, len_list)={n_rounds, self.n_actions, self.len_list}, but is {action_dist.shape}\"\n+ if pscore is None:\n+ pscore = np.ones_like(action) / self.n_actions\nif n_folds == 1:\nself.fit(\n@@ -289,11 +311,11 @@ class RegressionModel(BaseEstimator):\nreturn self.predict(context=context)\nelse:\nestimated_rewards_by_reg_model = np.zeros(\n- (context.shape[0], self.n_actions, self.len_list)\n+ (n_rounds, self.n_actions, self.len_list)\n)\n- skf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)\n- skf.get_n_splits(context)\n- for train_idx, test_idx in skf.split(context):\n+ kf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)\n+ kf.get_n_splits(context)\n+ for train_idx, test_idx in kf.split(context):\naction_dist_tr = (\naction_dist[train_idx] if action_dist is not None else action_dist\n)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix potential error in the fit_predict method of RegressionModel
641,014
10.11.2020 16:42:54
-32,400
9d5ab1f3f3632fcfe0db4ba73840b4001166eb4f
add predict_proba method along with some refactoring
[ { "change_type": "MODIFY", "old_path": "obp/policy/offline.py", "new_path": "obp/policy/offline.py", "diff": "@@ -10,13 +10,14 @@ from scipy.special import softmax\nfrom sklearn.base import clone, ClassifierMixin, is_classifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.utils import check_random_state, check_scalar\n+from tqdm import tqdm\n-from .base import BaseOffPolicyLearner\n+from .base import BaseOfflinePolicyLearner\nfrom ..utils import check_bandit_feedback_inputs\n@dataclass\n-class IPWLearner(BaseOffPolicyLearner):\n+class IPWLearner(BaseOfflinePolicyLearner):\n\"\"\"Off-policy learner with Inverse Probability Weighting.\nParameters\n@@ -97,7 +98,7 @@ class IPWLearner(BaseOffPolicyLearner):\npscore: Optional[np.ndarray] = None,\nposition: Optional[np.ndarray] = None,\n) -> None:\n- \"\"\"Fits the offline bandit policy according to the given logged bandit feedback data.\n+ \"\"\"Fits an offline bandit policy using the given logged bandit feedback data.\nNote\n--------\n@@ -108,7 +109,7 @@ class IPWLearner(BaseOffPolicyLearner):\n\\\\hat{\\\\pi}\n& \\\\in \\\\arg \\\\max_{\\\\pi \\\\in \\\\Pi} \\\\hat{V}_{\\\\mathrm{IPW}} (\\\\pi ; \\\\mathcal{D}) \\\\\\\\\n- & = \\\\arg \\\\max_{\\\\pi \\\\in \\\\Pi} \\\\mathbb{E}_{\\\\mathcal{D}} \\\\left[\\\\frac{\\\\mathbb{I} \\\\{\\\\pi (x_{i})=a_{i} \\\\}}{\\\\pi_{b}(a_{i} \\mid x_{i})} r_{i} \\\\right] \\\\\\\\\n+ & = \\\\arg \\\\max_{\\\\pi \\\\in \\\\Pi} \\\\mathbb{E}_{\\\\mathcal{D}} \\\\left[\\\\frac{\\\\mathbb{I} \\\\{\\\\pi (x_{i})=a_{i} \\\\}}{\\\\pi_{b}(a_{i} | x_{i})} r_{i} \\\\right] \\\\\\\\\n& = \\\\arg \\\\min_{\\\\pi \\\\in \\\\Pi} \\\\mathbb{E}_{\\\\mathcal{D}} \\\\left[\\\\frac{r_i}{\\\\pi_{b}(a_{i} | x_{i})} \\\\mathbb{I} \\\\{\\\\pi (x_{i}) \\\\neq a_{i} \\\\} \\\\right],\nwhere :math:`\\\\mathbb{E}_{\\\\mathcal{D}} [\\cdot]` is the empirical average over observations in :math:`\\\\mathcal{D}`.\n@@ -149,8 +150,8 @@ class IPWLearner(BaseOffPolicyLearner):\nposition = np.zeros_like(action, dtype=int)\nelse:\nassert (\n- position is not None\n- ), \"position has to be set when len_list is larger than 1\"\n+ isinstance(position, np.ndarray) and position.ndim == 1\n+ ), f\"when len_list > 1, position must be a 1-dimensional ndarray\"\nfor position_ in np.arange(self.len_list):\nX, sample_weight, y = self._create_train_data_for_opl(\n@@ -237,18 +238,18 @@ class IPWLearner(BaseOffPolicyLearner):\nNote\n--------\n- `sample_action` samples a **non-repetitive** set of actions for new data :math:`x \\\\in \\\\mathcal{X}`\n+ This `sample_action` method samples a **non-repetitive** set of actions for new data :math:`x \\\\in \\\\mathcal{X}`\nby first computing non-negative scores for all possible candidate products of action and position\n:math:`(a, k) \\\\in \\\\mathcal{A} \\\\times \\\\mathcal{K}` (where :math:`\\\\mathcal{A}` is an action set and\n- :math:`\\\\mathcal{K}` is a position set), and using a Plackett-Luce ranking model as follows:\n+ :math:`\\\\mathcal{K}` is a position set), and using softmax function as follows:\n.. math::\n- & P (A_1 = a_1) = \\\\frac{e^{f(x,a_1,1) / \\\\tau}}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} e^{f(x,a^{\\\\prime},1) / \\\\tau}} , \\\\\\\\\n- & P (A_2 = a_2 | A_1 = a_1) = \\\\frac{e^{f(x,a_2,2) / \\\\tau}}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A} \\\\backslash \\\\{a_1\\\\}} e^{f(x,a^{\\\\prime},2) / \\\\tau}} ,\n+ & P (A_1 = a_1 | x) = \\\\frac{e^{f(x,a_1,1) / \\\\tau}}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} e^{f(x,a^{\\\\prime},1) / \\\\tau}} , \\\\\\\\\n+ & P (A_2 = a_2 | A_1 = a_1, x) = \\\\frac{e^{f(x,a_2,2) / \\\\tau}}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A} \\\\backslash \\\\{a_1\\\\}} e^{f(x,a^{\\\\prime},2) / \\\\tau}} ,\n\\\\ldots\n- where :math:`A_k` is a random variable representing the action at a position :math:`k`.\n+ where :math:`A_k` is a random variable representing an action at a position :math:`k`.\n:math:`\\\\tau` is a temperature hyperparameter.\n:math:`f: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\times \\\\mathcal{K} \\\\rightarrow \\\\mathbb{R}_{+}`\nis a scoring function which is now implemented in the `predict_score` method.\n@@ -280,7 +281,7 @@ class IPWLearner(BaseOffPolicyLearner):\nrandom_ = check_random_state(random_state)\naction = np.zeros((n_rounds, self.n_actions, self.len_list))\nscore_predicted = self.predict_score(context=context)\n- for i in np.arange(n_rounds):\n+ for i in tqdm(np.arange(n_rounds), desc=\"[sample_action]\", total=n_rounds):\naction_set = np.arange(self.n_actions)\nfor position_ in np.arange(self.len_list):\nscore_ = softmax(score_predicted[i, action_set, position_] / tau)\n@@ -289,3 +290,52 @@ class IPWLearner(BaseOffPolicyLearner):\naction_set = np.delete(action_set, action_set == action_sampled)\nreturn action\n+ def predict_proba(\n+ self, context: np.ndarray, tau: Union[int, float] = 1.0,\n+ ) -> np.ndarray:\n+ \"\"\"Obtains action choice probabilities for new data based on scores predicted by a classifier.\n+\n+ Note\n+ --------\n+ This `predict_proba` method obtains action choice probabilities for new data :math:`x \\\\in \\\\mathcal{X}`\n+ by first computing non-negative scores for all possible candidate actions\n+ :math:`a \\\\in \\\\mathcal{A}` (where :math:`\\\\mathcal{A}` is an action set),\n+ and using a Plackett-Luce ranking model as follows:\n+\n+ .. math::\n+\n+ P (A = a | x) = \\\\frac{e^{f(x,a) / \\\\tau}}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} e^{f(x,a^{\\\\prime}) / \\\\tau}},\n+\n+ where :math:`A` is a random variable representing an action, and :math:`\\\\tau` is a temperature hyperparameter.\n+ :math:`f: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}_{+}`\n+ is a scoring function which is now implemented in the `predict_score` method.\n+\n+ **Note that this method can be used only when `len_list=1`, please use the `sample_action` method otherwise.**\n+\n+ Parameters\n+ ----------------\n+ context: array-like, shape (n_rounds_of_new_data, dim_context)\n+ Context vectors for new data.\n+\n+ tau: int or float, default=1.0\n+ A temperature parameter, controlling the randomness of the action choice.\n+ As :math:`\\\\tau \\\\rightarrow \\\\infty`, the algorithm will select arms uniformly at random.\n+\n+ Returns\n+ -----------\n+ choice_prob: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Action choice probabilities obtained by a trained classifier.\n+\n+ \"\"\"\n+ assert (\n+ self.len_list == 1\n+ ), f\"predict_proba method can be used only when len_list = 1\"\n+ assert (\n+ isinstance(context, np.ndarray) and context.ndim == 2\n+ ), \"context must be 2-dimensional ndarray\"\n+ check_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\n+\n+ score_predicted = self.predict_score(context=context)\n+ choice_prob = softmax(score_predicted / tau, axis=1)\n+ return choice_prob\n+\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add predict_proba method along with some refactoring
641,014
11.11.2020 01:06:55
-32,400
2cac0170ac846fcd24621c8fcd1d289bc8366220
fix output of obtain_batch_bandit_feedabck and sample_bootstrap_bandit_feedback of OpenBanditDataset
[ { "change_type": "MODIFY", "old_path": "obp/dataset/real.py", "new_path": "obp/dataset/real.py", "diff": "\"\"\"Dataset Class for Real-World Logged Bandit Feedback.\"\"\"\nfrom dataclasses import dataclass\nfrom pathlib import Path\n-from typing import Optional\n+from typing import Optional, Tuple, Union\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import rankdata\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.utils import check_random_state\n+\nfrom .base import BaseRealBanditDataset\nfrom ..types import BanditFeedback\n@@ -21,13 +22,13 @@ class OpenBanditDataset(BaseRealBanditDataset):\nNote\n-----\n- Users are free to implement their own feature engineering by overriding `pre_process` method.\n+ Users are free to implement their own feature engineering by overriding the `pre_process` method.\nParameters\n-----------\nbehavior_policy: str\nName of the behavior policy that generated the logged bandit feedback data.\n- Must be 'random' or 'bts'.\n+ Must be either 'random' or 'bts'.\ncampaign: str\nOne of the three possible campaigns considered in ZOZOTOWN, \"all\", \"men\", and \"women\".\n@@ -61,7 +62,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\n\"men\",\n\"women\",\n], f\"campaign must be one of 'all', 'men', and 'women', but {self.campaign} is given\"\n- assert isinstance(self.data_path, Path), f\"data_path must be a Path\"\n+ assert isinstance(self.data_path, Path), f\"data_path must be a Path type\"\nself.data_path = self.data_path / self.behavior_policy / self.campaign\nself.raw_data_file = f\"{self.campaign}.csv\"\n@@ -71,7 +72,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\n@property\ndef n_rounds(self) -> int:\n- \"\"\"Total number of rounds in the logged bandit dataset.\"\"\"\n+ \"\"\"Total number of rounds contained in the logged bandit dataset.\"\"\"\nreturn self.data.shape[0]\n@property\n@@ -81,7 +82,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\n@property\ndef dim_context(self) -> int:\n- \"\"\"Number of dimensions of context vectors.\"\"\"\n+ \"\"\"Dimensions of context vectors.\"\"\"\nreturn self.context.shape[1]\n@property\n@@ -104,7 +105,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\n----------\nbehavior_policy: str\nName of the behavior policy that generated the log data.\n- Must be 'random' or 'bts'.\n+ Must be either 'random' or 'bts'.\ncampaign: str\nOne of the three possible campaigns considered in ZOZOTOWN (i.e., \"all\", \"men\", and \"women\").\n@@ -121,7 +122,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\nReturns\n---------\non_policy_policy_value_estimate: float\n- Estimated on-policy policy value of behavior policy, i.e., :math:`\\\\mathbb{E}_{\\\\mathcal{D}} [r_t]`.\n+ Policy value of the behavior policy estimated by on-policy estimation, i.e., :math:`\\\\mathbb{E}_{\\\\mathcal{D}} [r_t]`.\nwhere :math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\nThis parameter is used as a ground-truth policy value in the evaluation of OPE estimators.\n@@ -170,43 +171,52 @@ class OpenBanditDataset(BaseRealBanditDataset):\ndef obtain_batch_bandit_feedback(\nself, test_size: float = 0.3, is_timeseries_split: bool = False\n- ) -> BanditFeedback:\n+ ) -> Union[BanditFeedback, Tuple[BanditFeedback, BanditFeedback]]:\n\"\"\"Obtain batch logged bandit feedback.\nParameters\n-----------\ntest_size: float, default=0.3\n- If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n+ If float, should be between 0.0 and 1.0 and represent the proportion of\n+ the dataset to include in the evaluation split.\nis_timeseries_split: bool, default=False\nIf true, split the original logged badnit feedback data by time series.\nReturns\n--------\n- bandit_feedback: BanditFeedback\n- Batch logged bandit feedback collected by the behavior policy.\n+ bandit_feedback: tuple or BanditFeedback\n+ Batch logged bandit feedback collected by a behavior policy.\n+ When `is_timeseries_split` is true, this method returns a tuple of\n+ train and evaluation sets of bandit feedback, (bandit_feedback_train, bandit_feedback_eval)\n\"\"\"\nif is_timeseries_split:\nassert isinstance(test_size, float) & (\n0 < test_size < 1\n- ), f\"test_size must be a float between 0 and 1, but {test_size} is given\"\n+ ), f\"test_size must be a float in the (0,1) interval, but {test_size} is given\"\nn_rounds_train = np.int(self.n_rounds * (1.0 - test_size))\n- return dict(\n+ bandit_feedback_train = dict(\nn_rounds=n_rounds_train,\nn_actions=self.n_actions,\naction=self.action[:n_rounds_train],\n- action_test=self.action[n_rounds_train:],\nposition=self.position[:n_rounds_train],\n- position_test=self.position[n_rounds_train:],\nreward=self.reward[:n_rounds_train],\n- reward_test=self.reward[n_rounds_train:],\npscore=self.pscore[:n_rounds_train],\n- pscore_test=self.pscore[n_rounds_train:],\ncontext=self.context[:n_rounds_train],\n- context_test=self.context[n_rounds_train:],\naction_context=self.action_context,\n)\n+ bandit_feedback_eval = dict(\n+ n_rounds=np.int(self.n_rounds - n_rounds_train),\n+ n_actions=self.n_actions,\n+ action=self.action[n_rounds_train:],\n+ position=self.position[n_rounds_train:],\n+ reward=self.reward[n_rounds_train:],\n+ pscore=self.pscore[n_rounds_train:],\n+ context=self.context[n_rounds_train:],\n+ action_context=self.action_context,\n+ )\n+ return bandit_feedback_train, bandit_feedback_eval\nelse:\nreturn dict(\nn_rounds=self.n_rounds,\n@@ -225,32 +235,50 @@ class OpenBanditDataset(BaseRealBanditDataset):\ntest_size: float = 0.3,\nis_timeseries_split: bool = False,\nrandom_state: Optional[int] = None,\n- ) -> BanditFeedback:\n- \"\"\"Sample bootstrap logged bandit feedback.\n+ ) -> Union[BanditFeedback, Tuple[BanditFeedback, BanditFeedback]]:\n+ \"\"\"Obtain bootstrap logged bandit feedback.\nParameters\n-----------\ntest_size: float, default=0.3\n- If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n+ If float, should be between 0.0 and 1.0 and represent the proportion of\n+ the dataset to include in the evaluation split.\nis_timeseries_split: bool, default=False\nIf true, split the original logged badnit feedback data by time series.\nrandom_state: int, default=None\n- Controls the random seed in sampling logged bandit dataset.\n+ Controls the random seed in bootstrap sampling.\nReturns\n--------\n- bootstrap_bandit_feedback: BanditFeedback\n- Bootstrapped logged bandit feedback independently sampled from the original data with replacement.\n+ bandit_feedback: BanditFeedback\n+ Logged bandit feedback sampled independently from the original data with replacement.\n+ When `is_timeseries_split` is true, this method returns a tuple of\n+ train and evaluation sets of bandit feedback, (bandit_feedback_train, bandit_feedback_eval)\n+ where the train set is sampled independently from the original train data with replacement.\n\"\"\"\n+ if is_timeseries_split:\n+ (\n+ bandit_feedback_train,\n+ bandit_feedback_eval,\n+ ) = self.obtain_batch_bandit_feedback(\n+ test_size=test_size, is_timeseries_split=is_timeseries_split\n+ )\n+ n_rounds = bandit_feedback_train[\"n_rounds\"]\n+ random_ = check_random_state(random_state)\n+ bootstrap_idx = random_.choice(n_rounds, size=n_rounds, replace=True)\n+ for key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\n+ bandit_feedback_train[key_] = bandit_feedback_train[key_][bootstrap_idx]\n+ return bandit_feedback_train, bandit_feedback_eval\n+ else:\nbandit_feedback = self.obtain_batch_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split\n)\nn_rounds = bandit_feedback[\"n_rounds\"]\nrandom_ = check_random_state(random_state)\n- bootstrap_idx = random_.choice(np.arange(n_rounds), size=n_rounds, replace=True)\n+ bootstrap_idx = random_.choice(n_rounds, size=n_rounds, replace=True)\nfor key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\nbandit_feedback[key_] = bandit_feedback[key_][bootstrap_idx]\nreturn bandit_feedback\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix output of obtain_batch_bandit_feedabck and sample_bootstrap_bandit_feedback of OpenBanditDataset
641,014
11.11.2020 14:19:42
-32,400
55d28f607e56dc1603fdcb8cbb13a5d2573d9c21
update output of obtain_batch_bandit_feedback of OpenBanditDataset
[ { "change_type": "MODIFY", "old_path": "obp/dataset/real.py", "new_path": "obp/dataset/real.py", "diff": "\"\"\"Dataset Class for Real-World Logged Bandit Feedback.\"\"\"\nfrom dataclasses import dataclass\nfrom pathlib import Path\n-from typing import Optional, Tuple, Union\n+from typing import Optional, Tuple\nimport numpy as np\nimport pandas as pd\n@@ -171,7 +171,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\ndef obtain_batch_bandit_feedback(\nself, test_size: float = 0.3, is_timeseries_split: bool = False\n- ) -> Union[BanditFeedback, Tuple[BanditFeedback, BanditFeedback]]:\n+ ) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\nParameters\n@@ -185,10 +185,8 @@ class OpenBanditDataset(BaseRealBanditDataset):\nReturns\n--------\n- bandit_feedback: tuple or BanditFeedback\n+ bandit_feedback: BanditFeedback\nBatch logged bandit feedback collected by a behavior policy.\n- When `is_timeseries_split` is true, this method returns a tuple of\n- train and evaluation sets of bandit feedback, (bandit_feedback_train, bandit_feedback_eval)\n\"\"\"\nif is_timeseries_split:\n@@ -196,27 +194,21 @@ class OpenBanditDataset(BaseRealBanditDataset):\n0 < test_size < 1\n), f\"test_size must be a float in the (0,1) interval, but {test_size} is given\"\nn_rounds_train = np.int(self.n_rounds * (1.0 - test_size))\n- bandit_feedback_train = dict(\n+ return dict(\nn_rounds=n_rounds_train,\nn_actions=self.n_actions,\naction=self.action[:n_rounds_train],\n+ action_test=self.action[n_rounds_train:],\nposition=self.position[:n_rounds_train],\n+ position_test=self.position[n_rounds_train:],\nreward=self.reward[:n_rounds_train],\n+ reward_test=self.reward[n_rounds_train:],\npscore=self.pscore[:n_rounds_train],\n+ pscore_test=self.pscore[n_rounds_train:],\ncontext=self.context[:n_rounds_train],\n+ context_test=self.context[n_rounds_train:],\naction_context=self.action_context,\n)\n- bandit_feedback_eval = dict(\n- n_rounds=np.int(self.n_rounds - n_rounds_train),\n- n_actions=self.n_actions,\n- action=self.action[n_rounds_train:],\n- position=self.position[n_rounds_train:],\n- reward=self.reward[n_rounds_train:],\n- pscore=self.pscore[n_rounds_train:],\n- context=self.context[n_rounds_train:],\n- action_context=self.action_context,\n- )\n- return bandit_feedback_train, bandit_feedback_eval\nelse:\nreturn dict(\nn_rounds=self.n_rounds,\n@@ -235,7 +227,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\ntest_size: float = 0.3,\nis_timeseries_split: bool = False,\nrandom_state: Optional[int] = None,\n- ) -> Union[BanditFeedback, Tuple[BanditFeedback, BanditFeedback]]:\n+ ) -> BanditFeedback:\n\"\"\"Obtain bootstrap logged bandit feedback.\nParameters\n@@ -254,31 +246,14 @@ class OpenBanditDataset(BaseRealBanditDataset):\n--------\nbandit_feedback: BanditFeedback\nLogged bandit feedback sampled independently from the original data with replacement.\n- When `is_timeseries_split` is true, this method returns a tuple of\n- train and evaluation sets of bandit feedback, (bandit_feedback_train, bandit_feedback_eval)\n- where the train set is sampled independently from the original train data with replacement.\n\"\"\"\n- if is_timeseries_split:\n- (\n- bandit_feedback_train,\n- bandit_feedback_eval,\n- ) = self.obtain_batch_bandit_feedback(\n- test_size=test_size, is_timeseries_split=is_timeseries_split\n- )\n- n_rounds = bandit_feedback_train[\"n_rounds\"]\n- random_ = check_random_state(random_state)\n- bootstrap_idx = random_.choice(n_rounds, size=n_rounds, replace=True)\n- for key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\n- bandit_feedback_train[key_] = bandit_feedback_train[key_][bootstrap_idx]\n- return bandit_feedback_train, bandit_feedback_eval\n- else:\nbandit_feedback = self.obtain_batch_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split\n)\nn_rounds = bandit_feedback[\"n_rounds\"]\nrandom_ = check_random_state(random_state)\n- bootstrap_idx = random_.choice(n_rounds, size=n_rounds, replace=True)\n+ bootstrap_idx = random_.choice(np.arange(n_rounds), size=n_rounds, replace=True)\nfor key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\nbandit_feedback[key_] = bandit_feedback[key_][bootstrap_idx]\nreturn bandit_feedback\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
update output of obtain_batch_bandit_feedback of OpenBanditDataset
641,014
11.11.2020 14:20:06
-32,400
286c4026fa5fe5ec0efb57e4adf0d4332eba587e
update comments in the benchmarking codes
[ { "change_type": "MODIFY", "old_path": "benchmark/ope/benchmark_off_policy_estimators.py", "new_path": "benchmark/ope/benchmark_off_policy_estimators.py", "diff": "@@ -17,23 +17,16 @@ from obp.ope import (\nDoublyRobust,\nSelfNormalizedDoublyRobust,\nSwitchDoublyRobust,\n- SwitchInverseProbabilityWeighting,\nDoublyRobustWithShrinkage,\n)\n-# compared OPE estimators\n+# OPE estimators compared\nope_estimators = [\nDirectMethod(),\nInverseProbabilityWeighting(),\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\nSelfNormalizedDoublyRobust(),\n- SwitchInverseProbabilityWeighting(tau=5, estimator_name=\"switch-ipw (tau=5)\"),\n- SwitchInverseProbabilityWeighting(tau=10, estimator_name=\"switch-ipw (tau=10)\"),\n- SwitchInverseProbabilityWeighting(tau=50, estimator_name=\"switch-ipw (tau=50)\"),\n- SwitchInverseProbabilityWeighting(tau=100, estimator_name=\"switch-ipw (tau=100)\"),\n- SwitchInverseProbabilityWeighting(tau=500, estimator_name=\"switch-ipw (tau=500)\"),\n- SwitchInverseProbabilityWeighting(tau=1000, estimator_name=\"switch-ipw (tau=1000)\"),\nSwitchDoublyRobust(tau=5, estimator_name=\"switch-dr (tau=5)\"),\nSwitchDoublyRobust(tau=10, estimator_name=\"switch-dr (tau=10)\"),\nSwitchDoublyRobust(tau=50, estimator_name=\"switch-dr (tau=50)\"),\n@@ -51,10 +44,7 @@ ope_estimators = [\nif __name__ == \"__main__\":\nparser = argparse.ArgumentParser(description=\"evaluate off-policy estimators.\")\nparser.add_argument(\n- \"--n_runs\",\n- type=int,\n- default=1,\n- help=\"number of bootstrap sampling in the experiment.\",\n+ \"--n_runs\", type=int, default=1, help=\"number of experimental runs.\",\n)\nparser.add_argument(\n\"--base_model\",\n" }, { "change_type": "MODIFY", "old_path": "benchmark/ope/train_regression_model.py", "new_path": "benchmark/ope/train_regression_model.py", "diff": "@@ -71,10 +71,7 @@ def evaluate_reg_model(\nif __name__ == \"__main__\":\nparser = argparse.ArgumentParser(description=\"evaluate off-policy estimators.\")\nparser.add_argument(\n- \"--n_runs\",\n- type=int,\n- default=1,\n- help=\"number of bootstrap sampling in the experiment.\",\n+ \"--n_runs\", type=int, default=1, help=\"number of experimental runs.\",\n)\nparser.add_argument(\n\"--base_model\",\n@@ -179,7 +176,7 @@ if __name__ == \"__main__\":\n)\ndef process(b: int):\n- # sample bootstrap samples from batch logged bandit feedback\n+ # sample bootstrap from batch logged bandit feedback\nbandit_feedback = obd.sample_bootstrap_bandit_feedback(\ntest_size=test_size,\nis_timeseries_split=is_timeseries_split,\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
update comments in the benchmarking codes
641,014
13.11.2020 03:40:41
-32,400
425cbd921bb3dc946ed903b7a6dcd6df46354e94
fix the definition of sndr
[ { "change_type": "MODIFY", "old_path": "docs/estimators.rst", "new_path": "docs/estimators.rst", "diff": "@@ -57,7 +57,7 @@ IPW does not have these properties.\nWe can define Self-Normalized Doubly Robust (SNDR) in a similar manner as follows.\n.. math::\n- \\hat{V}_{\\mathrm{SNDR}} (\\pi_e; \\calD) :=\\frac{\\E_{\\calD} [\\hat{q}(x_t, \\pi_e) + w(x_t,a_t) (r_t-\\hat{q}(x_t, a_t) ) ]}{\\E_{\\calD} [ w(x_t,a_t) ]}.\n+ \\hat{V}_{\\mathrm{SNDR}} (\\pi_e; \\calD) := \\E_{\\calD} \\left[\\hat{q}(x_t, \\pi_e) + \\frac{w(x_t,a_t) (r_t-\\hat{q}(x_t, a_t) )}{\\E_{\\calD} [ w(x_t,a_t) ]} \\right].\nSwitch Estimators\n" }, { "change_type": "MODIFY", "old_path": "obp/ope/estimators.py", "new_path": "obp/ope/estimators.py", "diff": "@@ -815,7 +815,7 @@ class SelfNormalizedDoublyRobust(DoublyRobust):\n.. math::\n\\\\hat{V}_{\\\\mathrm{SNDR}} (\\\\pi_e; \\\\mathcal{D}, \\\\hat{q}) :=\n- \\\\frac{\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\hat{q}(x_t,\\\\pi_e) + w(x_t,a_t) (r_t - \\\\hat{q}(x_t,a_t))]}{\\\\mathbb{E}_{\\\\mathcal{D}}[ w(x_t,a_t) ]},\n+ \\\\mathbb{E}_{\\\\mathcal{D}} \\\\left[\\\\hat{q}(x_t,\\\\pi_e) + \\\\frac{w(x_t,a_t) (r_t - \\\\hat{q}(x_t,a_t))}{\\\\mathbb{E}_{\\\\mathcal{D}}[ w(x_t,a_t) ]} \\\\right],\nwhere :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by\na behavior policy :math:`\\\\pi_b`. :math:`w(x,a):=\\\\pi_e (a|x)/\\\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.\n@@ -894,8 +894,8 @@ class SelfNormalizedDoublyRobust(DoublyRobust):\nq_hat_factual = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), action, position\n]\n- estimated_rewards += iw * (reward - q_hat_factual)\n- return estimated_rewards / iw.mean()\n+ estimated_rewards += iw * (reward - q_hat_factual) / iw.mean()\n+ return estimated_rewards\n@dataclass\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix the definition of sndr
641,014
13.11.2020 03:40:55
-32,400
8ad89a2056b8f6557ae9612ef8d9fb1cadb426b2
rerun example with new updates
[ { "change_type": "MODIFY", "old_path": "examples/examples_with_synthetic/README.md", "new_path": "examples/examples_with_synthetic/README.md", "diff": "## Description\n-Here, we use synthetic bandit datasets and pipeline to evaluate OPE estimators.\n-Specifically, we evaluate the estimation performances of well-known off-policy estimators using the ground-truth policy value of an evaluation policy, which is calculable with synthetic data.\n+Here, we use synthetic bandit datasets to evaluate OPE estimators.\n+Specifically, we evaluate the estimation performances of well-known off-policy estimators using the ground-truth policy value of an evaluation policy calculable with synthetic data.\n## Evaluating Off-Policy Estimators\n@@ -18,7 +18,7 @@ In the following, we evaluate the estimation performances of\n- Switch Doubly Robust (Switch-DR)\n- Doubly Robust with Optimistic Shrinkage (DRos)\n-For Switch-IPW, Switch-DR, and DRos, we tried some different values of hyperparameters.\n+For Switch-IPW, Switch-DR, and DRos, we try some different values of hyperparameters.\nSee [our documentation](https://zr-obp.readthedocs.io/en/latest/estimators.html) for the details about these estimators.\n[`./evaluate_off_policy_estimators.py`](./evaluate_off_policy_estimators.py) implements the evaluation of OPE estimators using synthetic bandit feedback data.\n@@ -43,7 +43,7 @@ python evaluate_off_policy_estimators.py\\\n- `$base_model_for_reg_model` specifies the base ML model for defining regression model and should be one of \"logistic_regression\", \"random_forest\", or \"lightgbm\".\n- `$n_jobs` is the maximum number of concurrently running jobs.\n-For example, the following command compares the estimation performances (relative estimation error; relative-ee) of the OPE estimators using the synthetic bandit feedback data with 100,000 rounds, 30 actions, context vectors with five dimensions.\n+For example, the following command compares the estimation performances (relative estimation error; relative-ee) of the OPE estimators using the synthetic bandit feedback data with 100,000 rounds, 30 actions, five dimensional context vectors.\n```bash\npython evaluate_off_policy_estimators.py\\\n@@ -57,22 +57,22 @@ python evaluate_off_policy_estimators.py\\\n--random_state 12345\n# relative-ee of OPE estimators and their standard deviations (lower is better).\n-# It appears that the performances of some OPE estimators depend on the choice of hyperparameters.\n+# It appears that the performances of some OPE estimators depend on the choice of their hyperparameters.\n# =============================================\n# random_state=12345\n# ---------------------------------------------\n# mean std\n-# dm 0.010835 0.000693\n-# ipw 0.001764 0.000474\n-# snipw 0.001630 0.001022\n-# dr 0.001265 0.000773\n-# sndr 0.002091 0.000115\n-# switch-ipw (tau=1) 0.138272 0.000630\n-# switch-ipw (tau=100) 0.001764 0.000474\n-# switch-dr (tau=1) 0.021673 0.000507\n-# switch-dr (tau=100) 0.001265 0.000773\n-# dr-os (lambda=1) 0.010676 0.000694\n-# dr-os (lambda=100) 0.001404 0.001083\n+# dm 0.011110 0.000565\n+# ipw 0.001953 0.000387\n+# snipw 0.002036 0.000835\n+# dr 0.001573 0.000631\n+# sndr 0.001578 0.000625\n+# switch-ipw (tau=1) 0.138523 0.000514\n+# switch-ipw (tau=100) 0.001953 0.000387\n+# switch-dr (tau=1) 0.021875 0.000414\n+# switch-dr (tau=100) 0.001573 0.000631\n+# dr-os (lambda=1) 0.010952 0.000567\n+# dr-os (lambda=100) 0.001835 0.000884\n# =============================================\n```\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
rerun example with new updates
641,014
13.11.2020 23:02:09
-32,400
cfc13d26d3a6abce4132da1e7242b0292f5ade35
update docstring of IPWLeaner
[ { "change_type": "MODIFY", "old_path": "obp/policy/offline.py", "new_path": "obp/policy/offline.py", "diff": "@@ -245,8 +245,8 @@ class IPWLearner(BaseOfflinePolicyLearner):\n.. math::\n- & P (A_1 = a_1 | x) = \\\\frac{e^{f(x,a_1,1) / \\\\tau}}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} e^{f(x,a^{\\\\prime},1) / \\\\tau}} , \\\\\\\\\n- & P (A_2 = a_2 | A_1 = a_1, x) = \\\\frac{e^{f(x,a_2,2) / \\\\tau}}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A} \\\\backslash \\\\{a_1\\\\}} e^{f(x,a^{\\\\prime},2) / \\\\tau}} ,\n+ & P (A_1 = a_1 | x) = \\\\frac{\\\\mathrm{exp}(f(x,a_1,1) / \\\\tau)}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} \\\\mathrm{exp}( f(x,a^{\\\\prime},1) / \\\\tau)} , \\\\\\\\\n+ & P (A_2 = a_2 | A_1 = a_1, x) = \\\\frac{\\\\mathrm{exp}(f(x,a_2,2) / \\\\tau)}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A} \\\\backslash \\\\{a_1\\\\}} \\\\mathrm{exp}(f(x,a^{\\\\prime},2) / \\\\tau )} ,\n\\\\ldots\nwhere :math:`A_k` is a random variable representing an action at a position :math:`k`.\n@@ -304,7 +304,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\n.. math::\n- P (A = a | x) = \\\\frac{e^{f(x,a) / \\\\tau}}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} e^{f(x,a^{\\\\prime}) / \\\\tau}},\n+ P (A = a | x) = \\\\frac{\\\\mathrm{exp}(f(x,a) / \\\\tau)}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} \\\\mathrm{exp}(f(x,a^{\\\\prime}) / \\\\tau)},\nwhere :math:`A` is a random variable representing an action, and :math:`\\\\tau` is a temperature hyperparameter.\n:math:`f: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}_{+}`\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
update docstring of IPWLeaner
641,003
29.11.2020 02:23:49
-32,400
9efcdec837117b08d90bd13cb23fbb2ebbd1f383
complement the advance preparation for quickstart
[ { "change_type": "MODIFY", "old_path": "docs/quickstart.rst", "new_path": "docs/quickstart.rst", "diff": "@@ -5,6 +5,8 @@ Quickstart\nWe show an example of conducting offline evaluation of the performance of Bernoulli Thompson Sampling (BernoulliTS) as an evaluation policy using *Inverse Probability Weighting (IPW)*\nand logged bandit feedback generated by the Random policy (behavior policy).\nWe see that only ten lines of code are sufficient to complete OPE from scratch.\n+In this example, it is assumed that the `obd/random/all` directory exists under the present working directory.\n+Please clone `the repository <https://github.com/st-tech/zr-obp>`_ in advance.\n.. code-block:: python\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
complement the advance preparation for quickstart
641,003
01.12.2020 20:53:12
-32,400
e8cdc0ade3d0dad5f987f1c424063d05be53664e
add description for contribution to README
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -307,6 +307,10 @@ If you are interested in the Open Bandit Project, we can follow the updates at i\n# Contact\nFor any question about the paper, data, and pipeline, feel free to contact: [email protected]\n+# Contribution\n+Thank you for considering contribution to Open Bandit Project!\n+Please find [CONTRIBUTING.md](./CONTRIBUTING.md) for general guidelines how to contribute to the project.\n+\n# License\nThis project is licensed under the Apache 2.0 License - see the [LICENSE](LICENSE) file for details.\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add description for contribution to README
641,012
09.12.2020 11:38:09
18,000
7592b54d6f723f475084d17e5648d88ae50365d6
move policy_name into post_init
[ { "change_type": "MODIFY", "old_path": "obp/policy/contextfree.py", "new_path": "obp/policy/contextfree.py", "diff": "@@ -49,13 +49,14 @@ class EpsilonGreedy(BaseContextFreePolicy):\n\"\"\"\nepsilon: float = 1.0\n- policy_name: str = f\"egreedy_{epsilon}\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\nassert (\n0 <= self.epsilon <= 1\n), f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ self.policy_name = f\"egreedy_{self.epsilon}\"\n+\nsuper().__post_init__()\ndef select_action(self) -> np.ndarray:\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
move policy_name into post_init
641,014
12.12.2020 02:42:17
-32,400
438a4dbaf025e321396f2aeada4a5dd6489cff11
use n_rounds instead of n_samples in multiclass.py
[ { "change_type": "MODIFY", "old_path": "obp/dataset/multiclass.py", "new_path": "obp/dataset/multiclass.py", "diff": "@@ -50,11 +50,11 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nParameters\n-----------\n- X: array-like, shape (n_samples,n_features)\n+ X: array-like, shape (n_rounds,n_features)\nTraining vector of the original multi-class classification data,\n- where n_samples is the number of samples and n_features is the number of features.\n+ where n_rounds is the number of samples and n_features is the number of features.\n- y: array-like, shape (n_samples,)\n+ y: array-like, shape (n_rounds,)\nTarget vector (relative to X) of the original multi-class classification data.\nbase_classifier_b: ClassifierMixin\n@@ -91,21 +91,21 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\ndataset_name=\"digits\",\n)\n# split the original data into the training and evaluation sets\n- >>> dataset.split_train_eval(eval_size=0.7)\n+ >>> dataset.split_train_eval(eval_size=0.7, random_state=12345)\n# obtain logged bandit feedback generated by behavior policy\n>>> bandit_feedback = dataset.obtain_batch_bandit_feedback(random_state=12345)\n>>> bandit_feedback\n{\n'n_actions': 10,\n- 'n_samples': 1258,\n- 'context': array([[ 0., 0., 1., ..., 6., 0., 0.],\n- [ 0., 1., 14., ..., 10., 1., 0.],\n- [ 0., 0., 7., ..., 3., 0., 0.],\n+ 'n_rounds': 1258,\n+ 'context': array([[ 0., 0., 0., ..., 16., 1., 0.],\n+ [ 0., 0., 7., ..., 16., 3., 0.],\n+ [ 0., 0., 12., ..., 8., 0., 0.],\n...,\n- [ 0., 0., 9., ..., 5., 0., 0.],\n- [ 0., 2., 15., ..., 16., 13., 0.],\n- [ 0., 0., 3., ..., 1., 0., 0.]]),\n- 'action': array([6, 9, 0, ..., 2, 2, 5]),\n+ [ 0., 1., 13., ..., 8., 11., 1.],\n+ [ 0., 0., 15., ..., 0., 0., 0.],\n+ [ 0., 0., 4., ..., 15., 3., 0.]]),\n+ 'action': array([6, 8, 5, ..., 2, 5, 9]),\n'reward': array([1., 1., 1., ..., 1., 1., 1.]),\n'position': array([0, 0, 0, ..., 0, 0, 0]),\n'pscore': array([0.82, 0.82, 0.82, ..., 0.82, 0.82, 0.82])\n@@ -118,13 +118,13 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\n)\n>>> ground_truth = dataset.calc_ground_truth_policy_value(action_dist=action_dist)\n>>> ground_truth\n- 0.8599205087440379\n+ 0.865643879173291\n# off-policy evaluation using IPW\n>>> ope = OffPolicyEvaluation(bandit_feedback=bandit_feedback, ope_estimators=[IPW()])\n>>> estimated_policy_value = ope.estimate_policy_values(action_dist=action_dist)\n>>> estimated_policy_value\n- {'ipw': 0.85877699794486}\n+ {'ipw': 0.8662705029276045}\n# evaluate the estimation performance (accuracy) of IPW by relative estimation error (relative-ee)\n>>> relative_estimation_errors = ope.evaluate_performance_of_estimators(\n@@ -132,7 +132,7 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\naction_dist=action_dist,\n)\n>>> relative_estimation_errors\n- {'ipw': 0.0020047333605093458}\n+ {'ipw': 0.000723881690137968}\nReferences\n------------\n@@ -158,8 +158,8 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nself.X, y = check_X_y(X=self.X, y=self.y, ensure_2d=True, multi_output=False)\nself.y = (rankdata(y, \"dense\") - 1).astype(int) # re-index action\n# fully observed labels\n- self.y_full = np.zeros((self.n_samples, self.n_actions))\n- self.y_full[np.arange(self.n_samples), y] = 1\n+ self.y_full = np.zeros((self.n_rounds, self.n_actions))\n+ self.y_full[np.arange(self.n_rounds), y] = 1\n@property\ndef len_list(self) -> int:\n@@ -172,14 +172,12 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nreturn np.unique(self.y).shape[0]\n@property\n- def n_samples(self) -> int:\n+ def n_rounds(self) -> int:\n\"\"\"Number of samples in the original multi-class classification data.\"\"\"\nreturn self.y.shape[0]\ndef split_train_eval(\n- self,\n- eval_size: Union[int, float] = 0.25,\n- random_state: Optional[int] = None,\n+ self, eval_size: Union[int, float] = 0.25, random_state: Optional[int] = None,\n) -> None:\n\"\"\"Split the original data into the training (used for policy learning) and evaluation (used for OPE) sets.\n@@ -203,11 +201,10 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\n) = train_test_split(\nself.X, self.y, self.y_full, test_size=eval_size, random_state=random_state\n)\n- self.n_samples_ev = self.X_ev.shape[0]\n+ self.n_rounds_ev = self.X_ev.shape[0]\ndef obtain_batch_bandit_feedback(\n- self,\n- random_state: Optional[int] = None,\n+ self, random_state: Optional[int] = None,\n) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback, an evaluation policy, and its ground-truth policy value.\n@@ -236,27 +233,27 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nbase_clf_b.fit(X=self.X_tr, y=self.y_tr)\npreds = base_clf_b.predict(self.X_ev).astype(int)\n# construct a behavior policy\n- pi_b = np.zeros((self.n_samples_ev, self.n_actions))\n+ pi_b = np.zeros((self.n_rounds_ev, self.n_actions))\npi_b[:, :] = (1.0 - self.alpha_b) / self.n_actions\n- pi_b[np.arange(self.n_samples_ev), preds] = (\n+ pi_b[np.arange(self.n_rounds_ev), preds] = (\nself.alpha_b + (1.0 - self.alpha_b) / self.n_actions\n)\n# sample action and factual reward based on the behavior policy\n- action = np.zeros(self.n_samples_ev, dtype=int)\n+ action = np.zeros(self.n_rounds_ev, dtype=int)\nfor i, p in enumerate(pi_b):\naction[i] = random_.choice(\nnp.arange(self.n_actions, dtype=int), p=p, replace=False\n)\n- reward = self.y_full_ev[np.arange(self.n_samples_ev), action]\n+ reward = self.y_full_ev[np.arange(self.n_rounds_ev), action]\nreturn dict(\nn_actions=self.n_actions,\n- n_samples=self.n_samples_ev,\n+ n_rounds=self.n_rounds_ev,\ncontext=self.X_ev,\naction=action,\nreward=reward,\n- position=np.zeros(self.n_samples_ev, dtype=int),\n- pscore=pi_b[np.arange(self.n_samples_ev), action],\n+ position=np.zeros(self.n_rounds_ev, dtype=int),\n+ pscore=pi_b[np.arange(self.n_rounds_ev), action],\n)\ndef obtain_action_dist_by_eval_policy(\n@@ -275,9 +272,9 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nReturns\n---------\n- action_dist_by_eval_policy: array-like, shape (n_samples_ev, n_actions, 1)\n+ action_dist_by_eval_policy: array-like, shape (n_rounds_ev, n_actions, 1)\naction_dist_by_eval_policy is an action choice probabilities by an evaluation policy.\n- where n_samples_ev is the number of samples in the evaluation set given the current train-eval split.\n+ where n_rounds_ev is the number of samples in the evaluation set given the current train-eval split.\nn_actions is the number of actions.\naxis 2 represents the length of list; it is always 1 in the current implementation.\n@@ -296,9 +293,9 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nbase_clf_e.fit(X=self.X_tr, y=self.y_tr)\npreds = base_clf_e.predict(self.X_ev).astype(int)\n# construct an evaluation policy\n- pi_e = np.zeros((self.n_samples_ev, self.n_actions))\n+ pi_e = np.zeros((self.n_rounds_ev, self.n_actions))\npi_e[:, :] = (1.0 - alpha_e) / self.n_actions\n- pi_e[np.arange(self.n_samples_ev), preds] = (\n+ pi_e[np.arange(self.n_rounds_ev), preds] = (\nalpha_e + (1.0 - alpha_e) / self.n_actions\n)\nreturn np.expand_dims(pi_e, 2)\n@@ -308,9 +305,9 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nParameters\n----------\n- action_dist: array-like, shape (n_samples_ev, n_actions, 1)\n+ action_dist: array-like, shape (n_rounds_ev, n_actions, 1)\nAction distribution or action choice probabilities of a policy whose ground-truth is to be caliculated here.\n- where n_samples_ev is the number of samples in the evaluation set given the current train-eval split.\n+ where n_rounds_ev is the number of samples in the evaluation set given the current train-eval split.\nn_actions is the number of actions.\naxis 2 of action_dist represents the length of list; it is always 1 in the current implementation.\n@@ -324,6 +321,6 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\naction_dist, np.ndarray\n), f\"action_dist must be a 3-D np.ndarray\"\nassert (\n- action_dist.shape[0] == self.n_samples_ev\n+ action_dist.shape[0] == self.n_rounds_ev\n), \"the size of axis 0 of action_dist must be the same as the number of samples in the evaluation set\"\n- return action_dist[np.arange(self.n_samples_ev), self.y_ev].mean()\n+ return action_dist[np.arange(self.n_rounds_ev), self.y_ev].mean()\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
use n_rounds instead of n_samples in multiclass.py
641,006
17.12.2020 00:49:46
-32,400
81e84643c2569256002ef228fe9cb74d457daa61
add ope estimator names in __init__.py
[ { "change_type": "MODIFY", "old_path": "obp/ope/__init__.py", "new_path": "obp/ope/__init__.py", "diff": "from .estimators import *\nfrom .meta import *\nfrom .regression_model import *\n+\n+__all_estimators__ = [\n+ \"ReplayMethod\",\n+ \"InverseProbabilityWeighting\",\n+ \"SelfNormalizedInverseProbabilityWeighting\",\n+ \"DirectMethod\",\n+ \"DoublyRobust\",\n+ \"DoublyRobustWithShrinkage\",\n+ \"SwitchInverseProbabilityWeighting\",\n+ \"SwitchDoublyRobust\",\n+ \"SelfNormalizedDoublyRobust\",\n+]\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add ope estimator names in __init__.py
641,003
20.12.2020 13:58:26
-32,400
84f6849717ee99f681803c15ed2f741f079eb6e4
replace assert with ValueError for epsilon
[ { "change_type": "MODIFY", "old_path": "obp/policy/contextfree.py", "new_path": "obp/policy/contextfree.py", "diff": "@@ -53,9 +53,10 @@ class EpsilonGreedy(BaseContextFreePolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- assert (\n- 0 <= self.epsilon <= 1\n- ), f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ if not 0 <= self.epsilon <= 1:\n+ raise ValueError(\n+ f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ )\nsuper().__post_init__()\ndef select_action(self) -> np.ndarray:\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
replace assert with ValueError for epsilon
641,004
25.12.2020 20:22:31
-32,400
116753e057b90cbc431932c50a4ece5ad70f399b
docs: refine filed descriptions
[ { "change_type": "MODIFY", "old_path": "obd/README.md", "new_path": "obd/README.md", "diff": "@@ -34,7 +34,7 @@ Here is a detailed description of the fields (they are comma-separated in the CS\n- item_id: index of items as arms (index ranges from 0-80 in \"All\" campaign, 0-33 for \"Men\" campaign, and 0-46 \"Women\" campaign).\n- position: the position of an item being recommended (1, 2, or 3 correspond to left, center, and right position of the ZOZOTOWN recommendation interface, respectively).\n- click: target variable that indicates if an item was clicked (1) or not (0).\n-- propensity_score: the probability of an item being recommended at each position.\n+- propensity_score: the probability of an item being recommended at the given position.\n- user feature 0-4: user-related feature values.\n- user-item affinity 0-: user-item affinity scores induced by the number of past clicks observed between each user-item pair.\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
docs: refine filed descriptions
641,003
30.12.2020 03:52:07
-32,400
87eeff107cd3344667c2bb6f8de42e56318b51dd
add test epsilon greedy
[ { "change_type": "ADD", "old_path": null, "new_path": "test/policy/test_contextfree.py", "diff": "+import pytest\n+import numpy as np\n+\n+from obp.policy.contextfree import EpsilonGreedy\n+\n+\n+def test_contextfree_base_exception():\n+\n+ with pytest.raises(ValueError):\n+ EpsilonGreedy(n_actions=0)\n+\n+ with pytest.raises(ValueError):\n+ EpsilonGreedy(n_actions=\"3\")\n+\n+ with pytest.raises(ValueError):\n+ EpsilonGreedy(n_actions=2, len_list=-1)\n+\n+ with pytest.raises(ValueError):\n+ EpsilonGreedy(n_actions=2, len_list=\"5\")\n+\n+ with pytest.raises(ValueError):\n+ EpsilonGreedy(n_actions=2, batch_size=-3)\n+\n+ with pytest.raises(ValueError):\n+ EpsilonGreedy(n_actions=2, batch_size=\"3\")\n+\n+\n+def test_egreedy_normal_epsilon():\n+\n+ policy1 = EpsilonGreedy(n_actions=2)\n+ assert 0 <= policy1.epsilon <= 1\n+\n+ policy2 = EpsilonGreedy(n_actions=3, epsilon=0.3)\n+ assert 0 <= policy2.epsilon <= 1\n+\n+\n+def test_egreedy_abnormal_epsilon():\n+\n+ with pytest.raises(ValueError):\n+ EpsilonGreedy(n_actions=2, epsilon=1.2)\n+\n+ with pytest.raises(ValueError):\n+ EpsilonGreedy(n_actions=5, epsilon=-0.2)\n+\n+\n+def test_egreedy_select_action_exploitation():\n+ trial_num = 50\n+ policy = EpsilonGreedy(n_actions=2, epsilon=0.0)\n+ policy.action_counts = np.array([3, 3])\n+ policy.reward_counts = np.array([3, 0])\n+ for _ in range(trial_num):\n+ assert policy.select_action()[0] == 0\n+\n+\n+def test_egreedy_select_action_exploration():\n+ trial_num = 50\n+ policy = EpsilonGreedy(n_actions=2, epsilon=1.0)\n+ policy.action_counts = np.array([3, 3])\n+ policy.reward_counts = np.array([3, 0])\n+ selected_action = [policy.select_action() for _ in range(trial_num)]\n+ assert 0 < sum(selected_action)[0] < trial_num\n+\n+\n+def test_egreedy_update_params():\n+ policy = EpsilonGreedy(n_actions=2, epsilon=1.0)\n+ policy.action_counts_temp = np.array([4, 3])\n+ policy.action_counts = np.copy(policy.action_counts_temp)\n+ policy.reward_counts_temp = np.array([2.0, 0.0])\n+ policy.reward_counts = np.copy(policy.reward_counts_temp)\n+ action = 0\n+ reward = 1.0\n+ policy.update_params(action, reward)\n+ assert np.array_equal(policy.action_counts, np.array([5, 3]))\n+ next_reward = (2.0 * (5 - 1) / 5) + (reward / 5)\n+ assert np.allclose(policy.reward_counts, np.array([next_reward, 0.0]))\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test epsilon greedy
641,003
30.12.2020 04:14:06
-32,400
510dbfd932935448c036b4da8b8742575efa7560
replace assert with Exception
[ { "change_type": "MODIFY", "old_path": "obp/policy/contextfree.py", "new_path": "obp/policy/contextfree.py", "diff": "@@ -198,9 +198,10 @@ class BernoulliTS(BaseContextFreePolicy):\n\"\"\"Initialize class.\"\"\"\nsuper().__post_init__()\nif self.is_zozotown_prior:\n- assert (\n- self.campaign is not None\n- ), \"`campaign` must be specified when `is_zozotown_prior` is True.\"\n+ if self.campaign is None:\n+ raise Exception(\n+ \"`campaign` must be specified when `is_zozotown_prior` is True.\"\n+ )\nself.alpha = production_prior_for_bts[self.campaign][\"alpha\"]\nself.beta = production_prior_for_bts[self.campaign][\"beta\"]\nelse:\n" }, { "change_type": "MODIFY", "old_path": "test/policy/test_contextfree.py", "new_path": "test/policy/test_contextfree.py", "diff": "@@ -3,6 +3,7 @@ import numpy as np\nfrom obp.policy.contextfree import EpsilonGreedy\nfrom obp.policy.contextfree import Random\n+from obp.policy.contextfree import BernoulliTS\ndef test_contextfree_base_exception():\n@@ -87,3 +88,8 @@ def test_random_compute_batch_action_dist():\nassert action_dist.shape[2] == len_list\nassert len(np.unique(action_dist)) == 1\nassert np.unique(action_dist)[0] == 1 / n_actions\n+\n+\n+def test_bernoulli_ts_zozotown_prior():\n+ with pytest.raises(Exception):\n+ BernoulliTS(n_actions=2, is_zozotown_prior=True)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
replace assert with Exception
641,003
30.12.2020 05:21:58
-32,400
ed065a17df3ac84cfc578a49546bc13e8cf0fae8
add test bernoulli ts
[ { "change_type": "MODIFY", "old_path": "test/policy/test_contextfree.py", "new_path": "test/policy/test_contextfree.py", "diff": "@@ -73,6 +73,7 @@ def test_egreedy_update_params():\nreward = 1.0\npolicy.update_params(action, reward)\nassert np.array_equal(policy.action_counts, np.array([5, 3]))\n+ # in epsilon greedy, reward is defined as mean reward\nnext_reward = (2.0 * (5 - 1) / 5) + (reward / 5)\nassert np.allclose(policy.reward_counts, np.array([next_reward, 0.0]))\n@@ -91,5 +92,55 @@ def test_random_compute_batch_action_dist():\ndef test_bernoulli_ts_zozotown_prior():\n+\nwith pytest.raises(Exception):\nBernoulliTS(n_actions=2, is_zozotown_prior=True)\n+\n+ policy_all = BernoulliTS(n_actions=2, is_zozotown_prior=True, campaign=\"all\")\n+ # check whether it is not an non-informative prior parameter (i.e., default parameter)\n+ assert len(np.unique(policy_all.alpha)) != 1\n+ assert len(np.unique(policy_all.beta)) != 1\n+\n+ policy_men = BernoulliTS(n_actions=2, is_zozotown_prior=True, campaign=\"men\")\n+ assert len(np.unique(policy_men.alpha)) != 1\n+ assert len(np.unique(policy_men.beta)) != 1\n+\n+ policy_women = BernoulliTS(n_actions=2, is_zozotown_prior=True, campaign=\"women\")\n+ assert len(np.unique(policy_women.alpha)) != 1\n+ assert len(np.unique(policy_women.beta)) != 1\n+\n+\n+def test_bernoulli_ts_select_action():\n+ # TODO: case where n_actions < len_list\n+\n+ policy1 = BernoulliTS(n_actions=3, len_list=3)\n+ assert np.allclose(np.sort(policy1.select_action()), np.array([0, 1, 2]))\n+\n+ policy = BernoulliTS(n_actions=5, len_list=3)\n+ assert len(policy.select_action()) == 3\n+\n+\n+def test_bernoulli_ts_update_params():\n+ policy = BernoulliTS(n_actions=2)\n+ policy.action_counts_temp = np.array([4, 3])\n+ policy.action_counts = np.copy(policy.action_counts_temp)\n+ policy.reward_counts_temp = np.array([2.0, 0.0])\n+ policy.reward_counts = np.copy(policy.reward_counts_temp)\n+ action = 0\n+ reward = 1.0\n+ policy.update_params(action, reward)\n+ assert np.array_equal(policy.action_counts, np.array([5, 3]))\n+ # in bernoulli ts, reward is defined as sum reward\n+ next_reward = 2.0 + reward\n+ assert np.allclose(policy.reward_counts, np.array([next_reward, 0.0]))\n+\n+\n+def test_bernoulli_ts_compute_batch_action_dist():\n+ n_rounds = 10\n+ n_actions = 5\n+ len_list = 2\n+ policy = BernoulliTS(n_actions=n_actions, len_list=len_list)\n+ action_dist = policy.compute_batch_action_dist(n_rounds=n_rounds, n_sim=30)\n+ assert action_dist.shape[0] == n_rounds\n+ assert action_dist.shape[1] == n_actions\n+ assert action_dist.shape[2] == len_list\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test bernoulli ts
641,003
30.12.2020 15:26:43
-32,400
be7f93694641189cd4f6d866c2471e3c8c8079aa
add test policy lin epsilon
[ { "change_type": "MODIFY", "old_path": "obp/policy/linear.py", "new_path": "obp/policy/linear.py", "diff": "@@ -49,9 +49,10 @@ class LinEpsilonGreedy(BaseContextualPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- assert (\n- 0 <= self.epsilon <= 1\n- ), f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ if not 0 <= self.epsilon <= 1:\n+ raise ValueError(\n+ f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ )\nself.policy_name = f\"linear_epsilon_greedy_{self.epsilon}\"\nsuper().__post_init__()\n@@ -80,6 +81,11 @@ class LinEpsilonGreedy(BaseContextualPolicy):\nList of selected actions.\n\"\"\"\n+ if context.ndim != 2 or context.shape[0] != 1:\n+ raise ValueError(\n+ f\"context shape must be (1, dim_context),but {context.shape} is given\"\n+ )\n+\nif self.random_.rand() > self.epsilon:\nself.theta_hat = np.concatenate(\n[\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test policy lin epsilon
641,003
30.12.2020 17:16:44
-32,400
cee30c17bdba4707af7ead12d0786e97740f4ada
add test policy lin ucb
[ { "change_type": "MODIFY", "old_path": "obp/policy/linear.py", "new_path": "obp/policy/linear.py", "diff": "@@ -171,9 +171,10 @@ class LinUCB(BaseContextualPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- assert (\n- 0 <= self.epsilon <= 1\n- ), f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ if self.epsilon < 0:\n+ raise ValueError(\n+ f\"epsilon must be positive scalar, but {self.epsilon} is given\"\n+ )\nself.policy_name = f\"linear_ucb_{self.epsilon}\"\nsuper().__post_init__()\n@@ -202,6 +203,10 @@ class LinUCB(BaseContextualPolicy):\nList of selected actions.\n\"\"\"\n+ if context.ndim != 2 or context.shape[0] != 1:\n+ raise ValueError(\n+ f\"context shape must be (1, dim_context),but {context.shape} is given\"\n+ )\nself.theta_hat = np.concatenate(\n[\nself.A_inv[i] @ np.expand_dims(self.b[:, i], axis=1)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test policy lin ucb
641,003
30.12.2020 17:24:02
-32,400
57ec520955f640c0f64bdbf61e79ffcf1fcac714
remove inappropriate docs argument
[ { "change_type": "MODIFY", "old_path": "obp/policy/linear.py", "new_path": "obp/policy/linear.py", "diff": "@@ -275,9 +275,6 @@ class LinTS(BaseContextualPolicy):\nbatch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- alpha_: float, default=1.\n- Prior parameter for the online logistic regression.\n-\nrandom_state: int, default=None\nControls the random seed in sampling actions.\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
remove inappropriate docs argument
641,006
09.01.2021 20:29:01
-32,400
f81bbef20424c2d634204e70e2557a3e0038d0da
add test of regression models
[ { "change_type": "MODIFY", "old_path": "tests/ope/conftest.py", "new_path": "tests/ope/conftest.py", "diff": "from typing import Set, Tuple, List\nfrom dataclasses import dataclass\n-\n+import copy\nimport numpy as np\nimport pytest\n@@ -18,7 +18,7 @@ from obp.dataset import (\n@dataclass\nclass LogisticEpsilonGreedyBatch(LogisticEpsilonGreedy):\n\"\"\"\n- Add random action flag and compute_batch_action_dist method to LogisticEpsilonGreedy\n+ WIP: Add random action flag and compute_batch_action_dist method to LogisticEpsilonGreedy\n\"\"\"\n@@ -86,6 +86,23 @@ def synthetic_bandit_feedback() -> BanditFeedback:\nreturn bandit_feedback\n+# adjust expected reward of synthetic bandit feedback\[email protected](scope=\"session\")\n+def fixed_synthetic_bandit_feedback(synthetic_bandit_feedback) -> BanditFeedback:\n+ # set random\n+ random_state = 12345\n+ random_ = check_random_state(random_state)\n+ # copy synthetic bandit feedback\n+ bandit_feedback = copy.deepcopy(synthetic_bandit_feedback)\n+ # expected reward would be about 0.6%, which is close to that of ZOZO dataset\n+ bandit_feedback[\"expected_reward\"] = bandit_feedback[\"expected_reward\"] * 0.01\n+ expected_reward_factual = bandit_feedback[\"expected_reward\"][\n+ np.arange(bandit_feedback[\"n_rounds\"]), bandit_feedback[\"action\"]\n+ ]\n+ bandit_feedback[\"reward\"] = random_.binomial(n=1, p=expected_reward_factual)\n+ return bandit_feedback\n+\n+\n# key set of bandit feedback data\[email protected](scope=\"session\")\ndef feedback_key_set() -> Set[str]:\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tests/ope/hyperparams.yaml", "diff": "+lightgbm:\n+ max_iter: 100\n+ learning_rate: 0.01\n+ max_depth: 5\n+ min_samples_leaf: 10\n+ random_state: 12345\n+logistic_regression:\n+ max_iter: 10000\n+ C: 1000\n+ random_state: 12345\n+random_forest:\n+ n_estimators: 100\n+ max_depth: 5\n+ min_samples_leaf: 10\n+ random_state: 12345\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tests/ope/test_regression_models.py", "diff": "+from typing import Dict\n+from pathlib import Path\n+import yaml\n+\n+import numpy as np\n+from sklearn.experimental import enable_hist_gradient_boosting\n+from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier\n+from sklearn.linear_model import LogisticRegression\n+from sklearn.metrics import roc_auc_score\n+\n+from obp.ope import RegressionModel\n+from obp.types import BanditFeedback\n+\n+\n+binary_model_dict = dict(\n+ logistic_regression=LogisticRegression,\n+ lightgbm=HistGradientBoostingClassifier,\n+ random_forest=RandomForestClassifier,\n+)\n+\n+# hyperparameter settings for the base ML model in regression model\n+cd_path = Path(__file__).parent.resolve()\n+with open(cd_path / \"hyperparams.yaml\", \"rb\") as f:\n+ hyperparams = yaml.safe_load(f)\n+\n+\n+def test_performance_of_binary_outcome_models(\n+ fixed_synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n+) -> None:\n+ \"\"\"\n+ Test the performance of ope estimators using synthetic bandit data and random evaluation policy\n+ when the regression model is estimated by a logistic regression\n+ \"\"\"\n+ bandit_feedback = fixed_synthetic_bandit_feedback.copy()\n+ expected_reward = np.expand_dims(bandit_feedback[\"expected_reward\"], axis=-1)\n+ action_dist = random_action_dist\n+ # compute ground truth policy value using expected reward\n+ ground_truth_policy_value = np.average(\n+ expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1\n+ )\n+ # compute statistics of ground truth policy value\n+ gt_mean = ground_truth_policy_value.mean()\n+ gt_std = ground_truth_policy_value.std(ddof=1)\n+ random_state = 12345\n+ auc_scores: Dict[str, float] = {}\n+ # check ground truth\n+ ci_times = 5\n+ ci_bound = gt_std * ci_times / np.sqrt(ground_truth_policy_value.shape[0])\n+ print(\n+ f\"gt_mean: {gt_mean}, {ci_times} * gt_std / sqrt({ground_truth_policy_value.shape[0]}): {ci_bound}\"\n+ )\n+ # check the performance of regression models using doubly robust criteria (|\\hat{q} - q| <= |q| is satisfied with high probability)\n+ dr_criteria_pass_rate = 0.9\n+ fit_methods = [\"normal\", \"iw\", \"mrdr\"]\n+ for fit_method in fit_methods:\n+ for model_name, model in binary_model_dict.items():\n+ regression_model = RegressionModel(\n+ n_actions=bandit_feedback[\"n_actions\"],\n+ len_list=bandit_feedback[\"position\"].ndim,\n+ action_context=bandit_feedback[\"action_context\"],\n+ base_model=model(**hyperparams[model_name]),\n+ fitting_method=fit_method,\n+ )\n+ if fit_method == \"normal\":\n+ # train regression model on logged bandit feedback data\n+ estimated_rewards_by_reg_model = regression_model.fit_predict(\n+ context=bandit_feedback[\"context\"],\n+ action=bandit_feedback[\"action\"],\n+ reward=bandit_feedback[\"reward\"],\n+ n_folds=3, # 3-fold cross-fitting\n+ random_state=random_state,\n+ )\n+ else:\n+ # train regression model on logged bandit feedback data\n+ estimated_rewards_by_reg_model = regression_model.fit_predict(\n+ context=bandit_feedback[\"context\"],\n+ action=bandit_feedback[\"action\"],\n+ reward=bandit_feedback[\"reward\"],\n+ pscore=bandit_feedback[\"pscore\"],\n+ position=bandit_feedback[\"position\"],\n+ action_dist=action_dist,\n+ n_folds=3, # 3-fold cross-fitting\n+ random_state=random_state,\n+ )\n+ auc_scores[model_name + \"_\" + fit_method] = roc_auc_score(\n+ y_true=bandit_feedback[\"reward\"],\n+ y_score=estimated_rewards_by_reg_model[\n+ np.arange(bandit_feedback[\"reward\"].shape[0]),\n+ bandit_feedback[\"action\"],\n+ bandit_feedback[\"position\"],\n+ ],\n+ )\n+ # compare dr criteria\n+ dr_criteria = np.abs((gt_mean - estimated_rewards_by_reg_model)) - np.abs(\n+ gt_mean\n+ )\n+ print(\n+ f\"Dr criteria is satisfied with probability {np.mean(dr_criteria <= 0)} ------ model: {model_name} ({fit_method}),\"\n+ )\n+ assert (\n+ np.mean(dr_criteria <= 0) >= dr_criteria_pass_rate\n+ ), f\"Dr criteria should not be larger then 0 with probability {dr_criteria_pass_rate}\"\n+\n+ for model_name in auc_scores:\n+ print(f\"AUC of {model_name} is {auc_scores[model_name]}\")\n+ assert (\n+ auc_scores[model_name] > 0.5\n+ ), f\"AUC of {model_name} should be greator than 0.5\"\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test of regression models
641,003
10.01.2021 20:31:40
-32,400
ebbdce43c9fe5b9c63cba55ad5023a1a8acce110
add test policy logistic epsilon
[ { "change_type": "MODIFY", "old_path": "obp/policy/logistic.py", "new_path": "obp/policy/logistic.py", "diff": "@@ -50,9 +50,10 @@ class LogisticEpsilonGreedy(BaseContextualPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- assert (\n- 0 <= self.epsilon <= 1\n- ), f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ if not 0 <= self.epsilon <= 1:\n+ raise ValueError(\n+ f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ )\nself.policy_name = f\"logistic_egreedy_{self.epsilon}\"\nsuper().__post_init__()\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/policy/test_logistic.py", "diff": "+import pytest\n+import numpy as np\n+\n+from obp.policy.logistic import LogisticEpsilonGreedy\n+from obp.policy.logistic import LogisticUCB\n+from obp.policy.logistic import LogisticTS\n+from obp.policy.logistic import MiniBatchLogisticRegression\n+\n+\n+def test_logistic_epsilon_normal_epsilon():\n+\n+ policy1 = LogisticEpsilonGreedy(n_actions=2, dim=2)\n+ assert 0 <= policy1.epsilon <= 1\n+\n+ policy2 = LogisticEpsilonGreedy(n_actions=2, dim=2, epsilon=0.5)\n+ assert policy2.epsilon == 0.5\n+\n+\n+def test_logistic_epsilon_abnormal_epsilon():\n+\n+ with pytest.raises(ValueError):\n+ LogisticEpsilonGreedy(n_actions=2, dim=2, epsilon=1.3)\n+\n+ with pytest.raises(ValueError):\n+ LogisticEpsilonGreedy(n_actions=2, dim=2, epsilon=-0.3)\n+\n+\n+def test_logistic_epsilon_each_action_model():\n+ n_actions = 3\n+ policy = LogisticEpsilonGreedy(n_actions=n_actions, dim=2, epsilon=0.5)\n+ for i in range(n_actions):\n+ assert isinstance(policy.model_list[i], MiniBatchLogisticRegression)\n+\n+\n+def test_logistic_epsilon_select_action_exploitation():\n+ trial_num = 50\n+ policy = LogisticEpsilonGreedy(n_actions=2, dim=2, epsilon=0.0)\n+ context = np.array([1.0, 1.0]).reshape(1, -1)\n+ policy.update_params(action=0, reward=1.0, context=context)\n+ policy.update_params(action=0, reward=1.0, context=context)\n+ policy.update_params(action=1, reward=1.0, context=context)\n+ policy.update_params(action=1, reward=0.0, context=context)\n+ for _ in range(trial_num):\n+ assert policy.select_action(context=context)[0] == 0\n+\n+\n+def test_logistic_epsilon_select_action_exploration():\n+ trial_num = 50\n+ policy = LogisticEpsilonGreedy(n_actions=2, dim=2, epsilon=1.0)\n+ context = np.array([1.0, 1.0]).reshape(1, -1)\n+ policy.update_params(action=0, reward=1.0, context=context)\n+ policy.update_params(action=0, reward=1.0, context=context)\n+ policy.update_params(action=1, reward=1.0, context=context)\n+ policy.update_params(action=1, reward=0.0, context=context)\n+ selected_action = [policy.select_action(context=context) for _ in range(trial_num)]\n+ assert 0 < sum(selected_action)[0] < trial_num\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test policy logistic epsilon
641,003
10.01.2021 20:37:21
-32,400
d14cd642a9e10b2c4b5daec6cb5bf939e21811e8
add test policy logistic ucb and ts
[ { "change_type": "MODIFY", "old_path": "obp/policy/logistic.py", "new_path": "obp/policy/logistic.py", "diff": "@@ -162,9 +162,10 @@ class LogisticUCB(BaseContextualPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- assert (\n- 0 <= self.epsilon <= 1\n- ), f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ if self.epsilon < 0:\n+ raise ValueError(\n+ f\"epsilon must be positive scalar, but {self.epsilon} is given\"\n+ )\nself.policy_name = f\"logistic_ucb_{self.epsilon}\"\nsuper().__post_init__()\n" }, { "change_type": "MODIFY", "old_path": "test/policy/test_logistic.py", "new_path": "test/policy/test_logistic.py", "diff": "@@ -54,3 +54,39 @@ def test_logistic_epsilon_select_action_exploration():\npolicy.update_params(action=1, reward=0.0, context=context)\nselected_action = [policy.select_action(context=context) for _ in range(trial_num)]\nassert 0 < sum(selected_action)[0] < trial_num\n+\n+\n+def test_lin_ucb_initialize():\n+ # note that the meaning of epsilon is different from that of LogisticEpsilonGreedy\n+ with pytest.raises(ValueError):\n+ LogisticUCB(n_actions=2, dim=2, epsilon=-0.2)\n+\n+ n_actions = 3\n+ policy = LogisticUCB(n_actions=n_actions, dim=2, epsilon=0.5)\n+ for i in range(n_actions):\n+ assert isinstance(policy.model_list[i], MiniBatchLogisticRegression)\n+\n+\n+def test_logistic_ucb_select_action():\n+ dim = 3\n+ len_list = 2\n+ policy = LogisticUCB(n_actions=4, dim=dim, len_list=2, epsilon=0.0)\n+ context = np.ones(dim).reshape(1, -1)\n+ action = policy.select_action(context=context)\n+ assert len(action) == len_list\n+\n+\n+def test_logistic_ts_initialize():\n+ n_actions = 3\n+ policy = LogisticUCB(n_actions=n_actions, dim=2, epsilon=0.5)\n+ for i in range(n_actions):\n+ assert isinstance(policy.model_list[i], MiniBatchLogisticRegression)\n+\n+\n+def test_logistic_ts_select_action():\n+ dim = 3\n+ len_list = 2\n+ policy = LogisticTS(n_actions=4, dim=dim, len_list=2)\n+ context = np.ones(dim).reshape(1, -1)\n+ action = policy.select_action(context=context)\n+ assert len(action) == len_list\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test policy logistic ucb and ts
641,003
11.01.2021 00:40:39
-32,400
1ed9907e5bece8e67a62e9e6ab940d33f5f7a8b3
add test policy offline unittest
[ { "change_type": "MODIFY", "old_path": "obp/policy/base.py", "new_path": "obp/policy/base.py", "diff": "@@ -185,12 +185,20 @@ class BaseOfflinePolicyLearner(metaclass=ABCMeta):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- assert self.n_actions > 1 and isinstance(\n- self.n_actions, int\n- ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n- assert self.len_list > 0 and isinstance(\n- self.len_list, int\n- ), f\"len_list must be a positive integer, but {self.len_list} is given\"\n+ if not isinstance(self.n_actions, int) or self.n_actions <= 1:\n+ raise ValueError(\n+ f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n+ )\n+\n+ if not isinstance(self.len_list, int) or self.len_list <= 0:\n+ raise ValueError(\n+ f\"len_list must be a positive integer, but {self.len_list} is given\"\n+ )\n+\n+ if self.n_actions < self.len_list:\n+ raise ValueError(\n+ f\"n_actions >= len_list should hold, but n_actions is {self.n_actions} and len_list is {self.len_list}\"\n+ )\n@property\ndef policy_type(self) -> str:\n" }, { "change_type": "MODIFY", "old_path": "obp/policy/offline.py", "new_path": "obp/policy/offline.py", "diff": "@@ -50,9 +50,8 @@ class IPWLearner(BaseOfflinePolicyLearner):\nif self.base_classifier is None:\nself.base_classifier = LogisticRegression(random_state=12345)\nelse:\n- assert is_classifier(\n- self.base_classifier\n- ), \"base_classifier must be a classifier\"\n+ if not is_classifier(self.base_classifier):\n+ raise ValueError(\"base_classifier must be a classifier\")\nself.base_classifier_list = [\nclone(self.base_classifier) for _ in np.arange(self.len_list)\n]\n@@ -149,17 +148,22 @@ class IPWLearner(BaseOfflinePolicyLearner):\nif self.len_list == 1:\nposition = np.zeros_like(action, dtype=int)\nelse:\n- assert (\n- isinstance(position, np.ndarray) and position.ndim == 1\n- ), f\"when len_list > 1, position must be a 1-dimensional ndarray\"\n+ if not isinstance(position, np.ndarray) or position.ndim != 1:\n+ raise ValueError(\n+ f\"when len_list > 1, position must be a 1-dimensional ndarray\"\n+ )\nfor position_ in np.arange(self.len_list):\n+ print(f\"position_:{position_}\")\n+ print(f\"position == position_:{position == position_}\")\n+ print(f\"context[position == position_]:{context[position == position_]}\")\nX, sample_weight, y = self._create_train_data_for_opl(\ncontext=context[position == position_],\naction=action[position == position_],\nreward=reward[position == position_],\npscore=pscore[position == position_],\n)\n+ print(f\"X={X}, y={y}, sample_weight={sample_weight}\")\nself.base_classifier_list[position_].fit(\nX=X, y=y, sample_weight=sample_weight\n)\n@@ -184,9 +188,8 @@ class IPWLearner(BaseOfflinePolicyLearner):\nIf you want a non-repetitive action set, please use the `sample_action` method.\n\"\"\"\n- assert (\n- isinstance(context, np.ndarray) and context.ndim == 2\n- ), \"context must be 2-dimensional ndarray\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\nn_rounds = context.shape[0]\naction_dist = np.zeros((n_rounds, self.n_actions, self.len_list))\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/policy/test_offline.py", "diff": "+import pytest\n+import numpy as np\n+from sklearn.linear_model import LogisticRegression\n+\n+\n+from obp.policy.offline import IPWLearner\n+\n+\n+def test_base_opl_init():\n+ # n_actions\n+ with pytest.raises(ValueError):\n+ IPWLearner(n_actions=1)\n+\n+ with pytest.raises(ValueError):\n+ IPWLearner(n_actions=\"3\")\n+\n+ # len_list\n+ with pytest.raises(ValueError):\n+ IPWLearner(n_actions=2, len_list=0)\n+\n+ with pytest.raises(ValueError):\n+ IPWLearner(n_actions=2, len_list=\"3\")\n+\n+ # policy_type\n+ assert IPWLearner(n_actions=2).policy_type == \"offline\"\n+\n+ # invalid relationship between n_actions and len_list\n+ with pytest.raises(ValueError):\n+ IPWLearner(n_actions=5, len_list=10)\n+\n+ with pytest.raises(ValueError):\n+ IPWLearner(n_actions=2, len_list=3)\n+\n+\n+def test_ipw_learner_init():\n+ # base classifier\n+ len_list = 2\n+ learner1 = IPWLearner(n_actions=2, len_list=len_list)\n+ assert isinstance(learner1.base_classifier, LogisticRegression)\n+ for i in range(len_list):\n+ assert isinstance(learner1.base_classifier_list[i], LogisticRegression)\n+\n+ with pytest.raises(ValueError):\n+ from sklearn.linear_model import LinearRegression\n+\n+ IPWLearner(n_actions=2, base_classifier=LinearRegression())\n+\n+ from sklearn.naive_bayes import GaussianNB\n+\n+ learner2 = IPWLearner(n_actions=2, len_list=len_list, base_classifier=GaussianNB())\n+ assert isinstance(learner2.base_classifier, GaussianNB)\n+ for i in range(len_list):\n+ assert isinstance(learner2.base_classifier_list[i], GaussianNB)\n+\n+\n+def test_create_train_data_for_opl():\n+ context = np.array([1.0, 1.0]).reshape(1, -1)\n+ learner = IPWLearner(n_actions=2)\n+ action = np.array([0])\n+ reward = np.array([1.0])\n+ pscore = np.array([0.5])\n+\n+ X, sample_weight, y = learner._create_train_data_for_opl(\n+ context=context, action=action, reward=reward, pscore=pscore\n+ )\n+\n+ assert np.allclose(X, np.array([1.0, 1.0]).reshape(1, -1))\n+ assert np.allclose(sample_weight, np.array([2.0]))\n+ assert np.allclose(y, np.array([0]))\n+\n+\n+def test_opl_fit():\n+ context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)\n+ action = np.array([0, 1])\n+ reward = np.array([1.0, 0.0])\n+ position = np.array([0, 0])\n+ learner = IPWLearner(n_actions=2, len_list=1)\n+ learner.fit(context=context, action=action, reward=reward, position=position)\n+\n+ # inconsistency with the shape\n+ with pytest.raises(AssertionError):\n+ learner = IPWLearner(n_actions=2, len_list=2)\n+ variant_context = np.array([1.0, 1.0, 1.0, 1.0])\n+ learner.fit(\n+ context=variant_context, action=action, reward=reward, position=position\n+ )\n+\n+ # len_list > 2, but position is not set\n+ with pytest.raises(ValueError):\n+ learner = IPWLearner(n_actions=2, len_list=2)\n+ learner.fit(context=context, action=action, reward=reward)\n+\n+\n+def test_opl_predict():\n+ n_actions = 2\n+ len_list = 1\n+\n+ # shape error\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ learner = IPWLearner(n_actions=n_actions, len_list=len_list)\n+ learner.predict(context=context)\n+\n+ # shape consistency of action_dist\n+ # n_rounds is 5, dim_context is 2\n+ context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)\n+ action = np.array([0, 1])\n+ reward = np.array([1.0, 0.0])\n+ position = np.array([0, 0])\n+ learner = IPWLearner(n_actions=2, len_list=1)\n+ learner.fit(context=context, action=action, reward=reward, position=position)\n+\n+ context_predict = np.array([i for i in range(10)]).reshape(5, 2)\n+ action_dist = learner.predict(context=context_predict)\n+ assert action_dist.shape[0] == 5\n+ assert action_dist.shape[1] == n_actions\n+ assert action_dist.shape[2] == len_list\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test policy offline unittest
641,003
11.01.2021 14:43:41
-32,400
b4dd1a4873ccf8ebe291105b3b036e85dccd343e
add test dataset synthetic
[ { "change_type": "MODIFY", "old_path": "obp/dataset/synthetic.py", "new_path": "obp/dataset/synthetic.py", "diff": "@@ -132,16 +132,21 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- assert self.n_actions > 1 and isinstance(\n- self.n_actions, int\n- ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n- assert self.dim_context > 0 and isinstance(\n- self.dim_context, int\n- ), f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n- assert self.reward_type in [\n+ if not isinstance(self.n_actions, int) or self.n_actions <= 1:\n+ raise ValueError(\n+ f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n+ )\n+ if not isinstance(self.dim_context, int) or self.dim_context <= 0:\n+ raise ValueError(\n+ f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n+ )\n+ if self.reward_type not in [\n\"binary\",\n\"continuous\",\n- ], f\"reward_type must be either 'binary' or 'continuous, but {self.reward_type} is given.'\"\n+ ]:\n+ raise ValueError(\n+ f\"reward_type must be either 'binary' or 'continuous, but {self.reward_type} is given.'\"\n+ )\nself.random_ = check_random_state(self.random_state)\nif self.reward_function is None:\n@@ -174,9 +179,10 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nGenerated synthetic bandit feedback dataset.\n\"\"\"\n- assert n_rounds > 0 and isinstance(\n- n_rounds, int\n- ), f\"n_rounds must be a positive integer, but {n_rounds} is given\"\n+ if not isinstance(n_rounds, int) or n_rounds <= 0:\n+ raise ValueError(\n+ f\"n_rounds must be a positive integer, but {n_rounds} is given\"\n+ )\ncontext = self.random_.normal(size=(n_rounds, self.dim_context))\n# sample actions for each round based on the behavior policy\n@@ -227,6 +233,9 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\nexpected_reward_ = truncnorm.stats(\na=a, b=b, loc=mean, scale=std, moments=\"m\"\n)\n+ else:\n+ raise NotImplementedError\n+\nreturn dict(\nn_rounds=n_rounds,\nn_actions=self.n_actions,\n@@ -264,12 +273,11 @@ def logistic_reward_function(\nExpected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n\"\"\"\n- assert (\n- isinstance(context, np.ndarray) and context.ndim == 2\n- ), \"context must be 2-dimensional ndarray\"\n- assert (\n- isinstance(action_context, np.ndarray) and action_context.ndim == 2\n- ), \"action_context must be 2-dimensional ndarray\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional ndarray\")\nrandom_ = check_random_state(random_state)\nlogits = np.zeros((context.shape[0], action_context.shape[0]))\n@@ -306,12 +314,11 @@ def linear_reward_function(\nExpected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n\"\"\"\n- assert (\n- isinstance(context, np.ndarray) and context.ndim == 2\n- ), \"context must be 2-dimensional ndarray\"\n- assert (\n- isinstance(action_context, np.ndarray) and action_context.ndim == 2\n- ), \"action_context must be 2-dimensional ndarray\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional ndarray\")\nrandom_ = check_random_state(random_state)\nexpected_reward = np.zeros((context.shape[0], action_context.shape[0]))\n@@ -348,12 +355,11 @@ def linear_behavior_policy(\nAction choice probabilities given context (:math:`x`), i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n\"\"\"\n- assert (\n- isinstance(context, np.ndarray) and context.ndim == 2\n- ), \"context must be 2-dimensional ndarray\"\n- assert (\n- isinstance(action_context, np.ndarray) and action_context.ndim == 2\n- ), \"action_context must be 2-dimensional ndarray\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional ndarray\")\nrandom_ = check_random_state(random_state)\nlogits = np.zeros((context.shape[0], action_context.shape[0]))\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/dataset/test_synthetic.py", "diff": "+import pytest\n+import numpy as np\n+\n+from obp.dataset import SyntheticBanditDataset\n+from obp.dataset.synthetic import (\n+ logistic_reward_function,\n+ linear_reward_function,\n+ linear_behavior_policy,\n+)\n+\n+\n+def test_synthetic_init():\n+ # n_actions\n+ with pytest.raises(ValueError):\n+ SyntheticBanditDataset(n_actions=1)\n+\n+ with pytest.raises(ValueError):\n+ SyntheticBanditDataset(n_actions=\"3\")\n+\n+ # dim_context\n+ with pytest.raises(ValueError):\n+ SyntheticBanditDataset(n_actions=2, dim_context=0)\n+\n+ with pytest.raises(ValueError):\n+ SyntheticBanditDataset(n_actions=2, dim_context=\"2\")\n+\n+ # reward_type\n+ with pytest.raises(ValueError):\n+ SyntheticBanditDataset(n_actions=2, reward_type=\"aaa\")\n+\n+ # when reward_function is None, expected_reward is randomly sampled in [0, 1]\n+ # this check includes the test of `sample_contextfree_expected_reward` function\n+ dataset = SyntheticBanditDataset(n_actions=2)\n+ assert len(dataset.expected_reward) == 2\n+ assert np.all(0 <= dataset.expected_reward) and np.all(dataset.expected_reward <= 1)\n+\n+ # when behavior_policy_function is None, behavior_policy is set to uniform one\n+ uniform_policy = np.array([0.5, 0.5])\n+ assert np.allclose(dataset.behavior_policy, uniform_policy)\n+\n+ # action_context\n+ ohe = np.eye(2, dtype=int)\n+ assert np.allclose(dataset.action_context, ohe)\n+\n+\n+def test_synthetic_obtain_batch_bandit_feedback():\n+ # n_rounds\n+ with pytest.raises(ValueError):\n+ dataset = SyntheticBanditDataset(n_actions=2)\n+ dataset.obtain_batch_bandit_feedback(n_rounds=0)\n+\n+ with pytest.raises(ValueError):\n+ dataset = SyntheticBanditDataset(n_actions=2)\n+ dataset.obtain_batch_bandit_feedback(n_rounds=\"3\")\n+\n+ # bandit feedback\n+ n_rounds = 10\n+ n_actions = 5\n+ dataset = SyntheticBanditDataset(n_actions=n_actions)\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ assert bandit_feedback[\"n_rounds\"] == n_rounds\n+ assert bandit_feedback[\"n_actions\"] == n_actions\n+ assert (\n+ bandit_feedback[\"context\"].shape[0] == n_rounds # n_rounds\n+ and bandit_feedback[\"context\"].shape[1] == 1 # default dim_context\n+ )\n+ assert (\n+ bandit_feedback[\"action_context\"].shape[0] == n_actions\n+ and bandit_feedback[\"action_context\"].shape[1] == n_actions\n+ )\n+ assert (\n+ bandit_feedback[\"action\"].ndim == 1\n+ and len(bandit_feedback[\"action\"]) == n_rounds\n+ )\n+ assert (\n+ bandit_feedback[\"position\"].ndim == 1\n+ and len(bandit_feedback[\"position\"]) == n_rounds\n+ )\n+ assert (\n+ bandit_feedback[\"reward\"].ndim == 1\n+ and len(bandit_feedback[\"reward\"]) == n_rounds\n+ )\n+ assert (\n+ bandit_feedback[\"expected_reward\"].shape[0] == n_rounds\n+ and bandit_feedback[\"expected_reward\"].shape[1] == n_actions\n+ )\n+ assert (\n+ bandit_feedback[\"pscore\"].ndim == 1\n+ and len(bandit_feedback[\"pscore\"]) == n_rounds\n+ )\n+\n+\n+def test_synthetic_logistic_reward_function():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ logistic_reward_function(context=context, action_context=np.ones([2, 2]))\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ logistic_reward_function(context=context, action_context=np.ones([2, 2]))\n+\n+ # action_context\n+ with pytest.raises(ValueError):\n+ action_context = np.array([1.0, 1.0])\n+ logistic_reward_function(context=np.ones([2, 2]), action_context=action_context)\n+\n+ with pytest.raises(ValueError):\n+ action_context = [1.0, 1.0]\n+ logistic_reward_function(context=np.ones([2, 2]), action_context=action_context)\n+\n+ # expected_reward\n+ n_rounds = 10\n+ dim_context = dim_action_context = 3\n+ n_actions = 5\n+ context = np.ones([n_rounds, dim_context])\n+ action_context = np.ones([n_actions, dim_action_context])\n+ expected_reward = logistic_reward_function(\n+ context=context, action_context=action_context\n+ )\n+ assert (\n+ expected_reward.shape[0] == n_rounds and expected_reward.shape[1] == n_actions\n+ )\n+ assert np.all(0 <= expected_reward) and np.all(expected_reward <= 1)\n+\n+\n+def test_synthetic_linear_reward_function():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ linear_reward_function(context=context, action_context=np.ones([2, 2]))\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ linear_reward_function(context=context, action_context=np.ones([2, 2]))\n+\n+ # action_context\n+ with pytest.raises(ValueError):\n+ action_context = np.array([1.0, 1.0])\n+ linear_reward_function(context=np.ones([2, 2]), action_context=action_context)\n+\n+ with pytest.raises(ValueError):\n+ action_context = [1.0, 1.0]\n+ linear_reward_function(context=np.ones([2, 2]), action_context=action_context)\n+\n+ # expected_reward\n+ n_rounds = 10\n+ dim_context = dim_action_context = 3\n+ n_actions = 5\n+ context = np.ones([n_rounds, dim_context])\n+ action_context = np.ones([n_actions, dim_action_context])\n+ expected_reward = linear_reward_function(\n+ context=context, action_context=action_context\n+ )\n+ assert (\n+ expected_reward.shape[0] == n_rounds and expected_reward.shape[1] == n_actions\n+ )\n+\n+\n+def test_synthetic_linear_behavior_policy():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ linear_behavior_policy(context=context, action_context=np.ones([2, 2]))\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ linear_behavior_policy(context=context, action_context=np.ones([2, 2]))\n+\n+ # action_context\n+ with pytest.raises(ValueError):\n+ action_context = np.array([1.0, 1.0])\n+ linear_behavior_policy(context=np.ones([2, 2]), action_context=action_context)\n+\n+ with pytest.raises(ValueError):\n+ action_context = [1.0, 1.0]\n+ linear_behavior_policy(context=np.ones([2, 2]), action_context=action_context)\n+\n+ # expected_reward\n+ n_rounds = 10\n+ dim_context = dim_action_context = 3\n+ n_actions = 5\n+ context = np.ones([n_rounds, dim_context])\n+ action_context = np.ones([n_actions, dim_action_context])\n+ action_prob = linear_behavior_policy(context=context, action_context=action_context)\n+ assert action_prob.shape[0] == n_rounds and action_prob.shape[1] == n_actions\n+ assert np.all(0 <= action_prob) and np.all(action_prob <= 1)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test dataset synthetic
641,003
24.01.2021 17:14:46
-32,400
3aaeffaa0ba02a2dd35c09e3b5c184da9e1260db
add test_real_init
[ { "change_type": "MODIFY", "old_path": "obp/dataset/real.py", "new_path": "obp/dataset/real.py", "diff": "@@ -53,16 +53,34 @@ class OpenBanditDataset(BaseRealBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Open Bandit Dataset Class.\"\"\"\n- assert self.behavior_policy in [\n+ # assert self.behavior_policy in [\n+ # \"bts\",\n+ # \"random\",\n+ # ], f\"behavior_policy must be either of 'bts' or 'random', but {self.behavior_policy} is given\"\n+ # assert self.campaign in [\n+ # \"all\",\n+ # \"men\",\n+ # \"women\",\n+ # ], f\"campaign must be one of 'all', 'men', and 'women', but {self.campaign} is given\"\n+ if self.behavior_policy not in [\n\"bts\",\n\"random\",\n- ], f\"behavior_policy must be either of 'bts' or 'random', but {self.behavior_policy} is given\"\n- assert self.campaign in [\n+ ]:\n+ raise ValueError(\n+ f\"behavior_policy must be either of 'bts' or 'random', but {self.behavior_policy} is given\"\n+ )\n+\n+ if self.campaign not in [\n\"all\",\n\"men\",\n\"women\",\n- ], f\"campaign must be one of 'all', 'men', and 'women', but {self.campaign} is given\"\n- assert isinstance(self.data_path, Path), f\"data_path must be a Path type\"\n+ ]:\n+ raise ValueError(\n+ f\"campaign must be one of 'all', 'men', and 'women', but {self.campaign} is given\"\n+ )\n+\n+ if not isinstance(self.data_path, Path):\n+ raise ValueError(\"data_path must be a Path type\")\nself.data_path = self.data_path / self.behavior_policy / self.campaign\nself.raw_data_file = f\"{self.campaign}.csv\"\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/dataset/test_real.py", "diff": "+import pytest\n+import numpy as np\n+import pandas as pd\n+\n+from obp.dataset import OpenBanditDataset\n+\n+\n+def test_real_init():\n+ # behavior_policy\n+ with pytest.raises(ValueError):\n+ OpenBanditDataset(behavior_policy=\"aaa\", campaign=\"all\")\n+\n+ # campaign\n+ with pytest.raises(ValueError):\n+ OpenBanditDataset(behavior_policy=\"random\", campaign=\"aaa\")\n+\n+ # data_path\n+ with pytest.raises(ValueError):\n+ OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\", data_path=\"raw_str_path\")\n+\n+ # load_raw_data\n+ opd = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ # check the value exists and has the right type\n+ assert (\n+ isinstance(opd.data, pd.DataFrame)\n+ and isinstance(opd.item_context, pd.DataFrame)\n+ and isinstance(opd.action, np.ndarray)\n+ and isinstance(opd.position, np.ndarray)\n+ and isinstance(opd.reward, np.ndarray)\n+ and isinstance(opd.pscore, np.ndarray)\n+ )\n+\n+ # pre_process (context and action_context)\n+ assert (\n+ isinstance(opd.context, np.ndarray)\n+ and isinstance(opd.action_context, np.ndarray)\n+ )\n+\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test_real_init
641,003
24.01.2021 17:18:31
-32,400
89b6c67760b76b7e13696741844688e7823962f4
fix description of algorithm
[ { "change_type": "MODIFY", "old_path": "test/policy/test_contextfree.py", "new_path": "test/policy/test_contextfree.py", "diff": "@@ -82,7 +82,7 @@ def test_egreedy_update_params():\nreward = 1.0\npolicy.update_params(action, reward)\nassert np.array_equal(policy.action_counts, np.array([5, 3]))\n- # in epsilon greedy, reward is defined as mean reward\n+ # in epsilon greedy, reward_counts is defined as the mean of observed rewards for each action\nnext_reward = (2.0 * (5 - 1) / 5) + (reward / 5)\nassert np.allclose(policy.reward_counts, np.array([next_reward, 0.0]))\n@@ -139,7 +139,7 @@ def test_bernoulli_ts_update_params():\nreward = 1.0\npolicy.update_params(action, reward)\nassert np.array_equal(policy.action_counts, np.array([5, 3]))\n- # in bernoulli ts, reward is defined as sum reward\n+ # in bernoulli ts, reward_counts is defined as the sum of observed rewards for each action\nnext_reward = 2.0 + reward\nassert np.allclose(policy.reward_counts, np.array([next_reward, 0.0]))\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix description of algorithm
641,003
24.01.2021 17:22:08
-32,400
fedc9fcc78ceeb315d8ebe8917f4d00d2cf41166
add test for invalid relationship between n_actions and len_list
[ { "change_type": "MODIFY", "old_path": "test/policy/test_contextfree.py", "new_path": "test/policy/test_contextfree.py", "diff": "@@ -120,7 +120,12 @@ def test_bernoulli_ts_zozotown_prior():\ndef test_bernoulli_ts_select_action():\n- # TODO: case where n_actions < len_list\n+ # invalid relationship between n_actions and len_list\n+ with pytest.raises(ValueError):\n+ BernoulliTS(n_actions=5, len_list=10)\n+\n+ with pytest.raises(ValueError):\n+ BernoulliTS(n_actions=2, len_list=3)\npolicy1 = BernoulliTS(n_actions=3, len_list=3)\nassert np.allclose(np.sort(policy1.select_action()), np.array([0, 1, 2]))\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test for invalid relationship between n_actions and len_list
641,003
24.01.2021 17:31:15
-32,400
f4e6cda2d1ac635d6df46d0c289a8da9e5773f76
fix wrong specification
[ { "change_type": "MODIFY", "old_path": "test/policy/test_logistic.py", "new_path": "test/policy/test_logistic.py", "diff": "@@ -56,7 +56,7 @@ def test_logistic_epsilon_select_action_exploration():\nassert 0 < sum(selected_action)[0] < trial_num\n-def test_lin_ucb_initialize():\n+def test_logistic_ucb_initialize():\n# note that the meaning of epsilon is different from that of LogisticEpsilonGreedy\nwith pytest.raises(ValueError):\nLogisticUCB(n_actions=2, dim=2, epsilon=-0.2)\n@@ -78,7 +78,7 @@ def test_logistic_ucb_select_action():\ndef test_logistic_ts_initialize():\nn_actions = 3\n- policy = LogisticUCB(n_actions=n_actions, dim=2, epsilon=0.5)\n+ policy = LogisticTS(n_actions=n_actions, dim=2)\nfor i in range(n_actions):\nassert isinstance(policy.model_list[i], MiniBatchLogisticRegression)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix wrong specification
641,003
24.01.2021 17:37:31
-32,400
1991fb162f248284d38a10b96eeb5eac6a0f0051
add validation for alpha and lambda
[ { "change_type": "MODIFY", "old_path": "obp/policy/base.py", "new_path": "obp/policy/base.py", "diff": "@@ -143,6 +143,16 @@ class BaseContextualPolicy(metaclass=ABCMeta):\nf\"n_actions >= len_list should hold, but n_actions is {self.n_actions} and len_list is {self.len_list}\"\n)\n+ if not isinstance(self.alpha_, float) or self.alpha_ <= 0.0:\n+ raise ValueError(\n+ f\"alpha_ should be a positive float, but {self.alpha_} is given\"\n+ )\n+\n+ if not isinstance(self.lambda_, float) or self.lambda_ <= 0.0:\n+ raise ValueError(\n+ f\"lambda_ should be a positive float, but {self.lambda_} is given\"\n+ )\n+\nself.n_trial = 0\nself.random_ = check_random_state(self.random_state)\nself.alpha_list = self.alpha_ * np.ones(self.n_actions)\n" }, { "change_type": "MODIFY", "old_path": "test/policy/test_linear.py", "new_path": "test/policy/test_linear.py", "diff": "@@ -54,6 +54,16 @@ def test_linear_base_exception():\nwith pytest.raises(ValueError):\nLinEpsilonGreedy(n_actions=2, len_list=3, dim=2)\n+ # invalid alpha and lambda\n+ with pytest.raises(ValueError):\n+ LinEpsilonGreedy(n_actions=2, dim=2, alpha_=0.0, lambda_=-3.0)\n+\n+ with pytest.raises(ValueError):\n+ LinEpsilonGreedy(n_actions=2, dim=2, alpha_=-0.0)\n+\n+ with pytest.raises(ValueError):\n+ LinEpsilonGreedy(n_actions=2, dim=2, lambda_=-1.0)\n+\ndef test_lin_epsilon_normal_epsilon():\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add validation for alpha and lambda
641,003
24.01.2021 17:54:20
-32,400
de7a8d50342b37c745933119e0d7560b22dbb726
add validation for sample_action
[ { "change_type": "MODIFY", "old_path": "obp/policy/offline.py", "new_path": "obp/policy/offline.py", "diff": "@@ -271,9 +271,8 @@ class IPWLearner(BaseOfflinePolicyLearner):\nAction sampled by a trained classifier.\n\"\"\"\n- assert (\n- isinstance(context, np.ndarray) and context.ndim == 2\n- ), \"context must be 2-dimensional ndarray\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\ncheck_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\nn_rounds = context.shape[0]\n" }, { "change_type": "MODIFY", "old_path": "test/policy/test_offline.py", "new_path": "test/policy/test_offline.py", "diff": "@@ -110,8 +110,35 @@ def test_opl_predict():\nlearner = IPWLearner(n_actions=2, len_list=1)\nlearner.fit(context=context, action=action, reward=reward, position=position)\n- context_predict = np.array([i for i in range(10)]).reshape(5, 2)\n- action_dist = learner.predict(context=context_predict)\n+ context_test = np.array([i for i in range(10)]).reshape(5, 2)\n+ action_dist = learner.predict(context=context_test)\nassert action_dist.shape[0] == 5\nassert action_dist.shape[1] == n_actions\nassert action_dist.shape[2] == len_list\n+\n+\n+def test_sample_action():\n+ n_actions = 2\n+ len_list = 1\n+ context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)\n+ action = np.array([0, 1])\n+ reward = np.array([1.0, 0.0])\n+ position = np.array([0, 0])\n+ learner = IPWLearner(n_actions=n_actions, len_list=len_list)\n+ learner.fit(context=context, action=action, reward=reward, position=position)\n+\n+ with pytest.raises(ValueError):\n+ invalid_type_context = [1.0, 2.0]\n+ learner.sample_action(context=invalid_type_context)\n+\n+ with pytest.raises(ValueError):\n+ invalid_ndim_context = np.array([1.0, 2.0, 3.0, 4.0])\n+ learner.sample_action(context=invalid_ndim_context)\n+\n+ context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)\n+ n_rounds = context.shape[0]\n+ sampled_action = learner.sample_action(context=context)\n+\n+ assert sampled_action.shape[0] == n_rounds\n+ assert sampled_action.shape[1] == n_actions\n+ assert sampled_action.shape[2] == len_list\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add validation for sample_action
641,006
24.01.2021 18:28:58
-32,400
1c7cc5d01ed63b2fb839794a84dc71e7be084b77
fix reward scaling method of fixed_synthetic_bandit_feedback
[ { "change_type": "MODIFY", "old_path": "tests/ope/conftest.py", "new_path": "tests/ope/conftest.py", "diff": "@@ -13,6 +13,7 @@ from obp.dataset import (\nlogistic_reward_function,\nlinear_behavior_policy,\n)\n+from obp.utils import sigmoid\n@dataclass\n@@ -94,8 +95,11 @@ def fixed_synthetic_bandit_feedback(synthetic_bandit_feedback) -> BanditFeedback\nrandom_ = check_random_state(random_state)\n# copy synthetic bandit feedback\nbandit_feedback = copy.deepcopy(synthetic_bandit_feedback)\n- # expected reward would be about 0.6%, which is close to that of ZOZO dataset\n- bandit_feedback[\"expected_reward\"] = bandit_feedback[\"expected_reward\"] * 0.01\n+ # expected reward would be about 0.65%, which is close to that of ZOZO dataset\n+ logit = np.log(\n+ bandit_feedback[\"expected_reward\"] / (1 - bandit_feedback[\"expected_reward\"])\n+ )\n+ bandit_feedback[\"expected_reward\"] = sigmoid(logit - 4.0)\nexpected_reward_factual = bandit_feedback[\"expected_reward\"][\nnp.arange(bandit_feedback[\"n_rounds\"]), bandit_feedback[\"action\"]\n]\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_regression_models.py", "new_path": "tests/ope/test_regression_models.py", "diff": "@@ -50,7 +50,7 @@ def test_performance_of_binary_outcome_models(\nf\"gt_mean: {gt_mean}, {ci_times} * gt_std / sqrt({ground_truth_policy_value.shape[0]}): {ci_bound}\"\n)\n# check the performance of regression models using doubly robust criteria (|\\hat{q} - q| <= |q| is satisfied with high probability)\n- dr_criteria_pass_rate = 0.9\n+ dr_criteria_pass_rate = 0.8\nfit_methods = [\"normal\", \"iw\", \"mrdr\"]\nfor fit_method in fit_methods:\nfor model_name, model in binary_model_dict.items():\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix reward scaling method of fixed_synthetic_bandit_feedback
641,006
24.01.2021 18:42:49
-32,400
1de0b535fc7c66369437a1d365d554d1bfc696ca
fix self-normalized tests
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_dr_estimators.py", "new_path": "tests/ope/test_dr_estimators.py", "diff": "@@ -89,14 +89,10 @@ def test_sndr_range_using_random_evaluation_policy(\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\n# make pscore too small (to check the normalization effect)\ninput_dict[\"pscore\"] = input_dict[\"pscore\"] ** 3\n- estimated_policy_value = dr.estimate_policy_value(**input_dict)\n- assert (\n- estimated_policy_value > 1\n- ), f\"estimated policy value of dr should be greater than 1 when pscore is too small, but {estimated_policy_value}\"\nestimated_policy_value = sndr.estimate_policy_value(**input_dict)\nassert (\n- estimated_policy_value <= 1\n- ), f\"estimated policy value of sndr should not be greater than 1 even if pscore is too small, but {estimated_policy_value}\"\n+ estimated_policy_value <= 2\n+ ), f\"estimated policy value of sndr should not be greater than 2 even if pscore is too small, but {estimated_policy_value}\"\ndef test_dr_shrinkage_using_random_evaluation_policy(\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_ipw_estimators.py", "new_path": "tests/ope/test_ipw_estimators.py", "diff": "@@ -68,10 +68,6 @@ def test_snipw_range_using_random_evaluation_policy(\ninput_dict[\"action_dist\"] = action_dist\n# make pscore too small (to check the normalization effect)\ninput_dict[\"pscore\"] = input_dict[\"pscore\"] ** 3\n- estimated_policy_value = ipw.estimate_policy_value(**input_dict)\n- assert (\n- estimated_policy_value > 1\n- ), f\"estimated policy value of ipw should be greater than 1 when pscore is too small, but {estimated_policy_value}\"\nestimated_policy_value = snipw.estimate_policy_value(**input_dict)\nassert (\nestimated_policy_value <= 1\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix self-normalized tests
641,006
24.01.2021 18:47:07
-32,400
4fd49b762374d80ede53cd122ca7771f25fb6c8c
removed unnecessary processes
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_ipw_estimators.py", "new_path": "tests/ope/test_ipw_estimators.py", "diff": "@@ -57,7 +57,6 @@ def test_snipw_range_using_random_evaluation_policy(\n\"\"\"\naction_dist = random_action_dist\n# prepare dm\n- ipw = InverseProbabilityWeighting()\nsnipw = SelfNormalizedInverseProbabilityWeighting()\n# prepare input dict\ninput_dict = {\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_regression_models.py", "new_path": "tests/ope/test_regression_models.py", "diff": "@@ -40,15 +40,10 @@ def test_performance_of_binary_outcome_models(\n)\n# compute statistics of ground truth policy value\ngt_mean = ground_truth_policy_value.mean()\n- gt_std = ground_truth_policy_value.std(ddof=1)\nrandom_state = 12345\nauc_scores: Dict[str, float] = {}\n# check ground truth\n- ci_times = 5\n- ci_bound = gt_std * ci_times / np.sqrt(ground_truth_policy_value.shape[0])\n- print(\n- f\"gt_mean: {gt_mean}, {ci_times} * gt_std / sqrt({ground_truth_policy_value.shape[0]}): {ci_bound}\"\n- )\n+ print(f\"gt_mean: {gt_mean}\")\n# check the performance of regression models using doubly robust criteria (|\\hat{q} - q| <= |q| is satisfied with high probability)\ndr_criteria_pass_rate = 0.8\nfit_methods = [\"normal\", \"iw\", \"mrdr\"]\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
removed unnecessary processes
641,006
24.01.2021 19:03:06
-32,400
2aa1e0995160f3a75a40d090f640bf921050eb9b
calculate relative-ee in our testing function
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_meta.py", "new_path": "tests/ope/test_meta.py", "diff": "@@ -314,19 +314,24 @@ def test_meta_summarize_off_policy_estimates(\ndef test_meta_evaluate_performance_of_estimators(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n+ gt = 0.5\n+ # calculate relative-ee\n+ eval_metric_ope_dict = {\n+ \"ipw\": np.abs((mock_policy_value + ipw.eps - gt) / gt),\n+ \"ipw3\": np.abs((mock_policy_value + ipw3.eps - gt) / gt),\n+ }\n+ # check performance estimators\nope_ = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw, ipw3]\n)\n- # check performance format\n- gt = 0.5\nperformance = ope_.evaluate_performance_of_estimators(\nground_truth_policy_value=gt,\naction_dist=random_action_dist,\nmetric=\"relative-ee\",\n)\n- assert set(performance.keys()) == set(\n- [ipw.estimator_name, ipw3.estimator_name]\n- ), \"Invalid key of performance response\"\n+ for k, v in performance.items():\n+ assert k in eval_metric_ope_dict, \"Invalid key of performance response\"\n+ assert v == eval_metric_ope_dict[k], \"Invalid value of performance response\"\n# zero division error when using relative-ee\nwith pytest.raises(ZeroDivisionError, match=r\"float division by zero\"):\n_ = ope_.evaluate_performance_of_estimators(\n@@ -341,5 +346,5 @@ def test_meta_evaluate_performance_of_estimators(\nmetric=\"relative-ee\",\n)\nassert_frame_equal(\n- performance_df, pd.DataFrame(performance, index=[\"relative-ee\"]).T\n+ performance_df, pd.DataFrame(eval_metric_ope_dict, index=[\"relative-ee\"]).T\n), \"Invalid summarization (performance)\"\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
calculate relative-ee in our testing function
641,006
24.01.2021 19:18:53
-32,400
9bf6fe852b604b345ec2b1ea46e5a28f7fe149cd
fix the naming of variables and functions
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_all_estimators.py", "new_path": "tests/ope/test_all_estimators.py", "diff": "@@ -29,7 +29,7 @@ def test_fixture(\n), \"model list length of logistic evaluation policy should be the same as n_actions\"\n-def test_expected_value_of_random_evaluation_policy(\n+def test_performance_of_ope_estimators_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n@@ -40,12 +40,10 @@ def test_expected_value_of_random_evaluation_policy(\n)\naction_dist = random_action_dist\n# compute ground truth policy value using expected reward\n- ground_truth_policy_value = np.average(\n- expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1\n- )\n+ q_pi_e = np.average(expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1)\n# compute statistics of ground truth policy value\n- gt_mean = ground_truth_policy_value.mean()\n- gt_std = ground_truth_policy_value.std(ddof=1)\n+ gt_mean = q_pi_e.mean()\n+ gt_std = q_pi_e.std(ddof=1)\n# extract most of all estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\nall_estimators = ope.__all_estimators__\nestimators = [\n@@ -61,7 +59,7 @@ def test_expected_value_of_random_evaluation_policy(\naction_dist=action_dist, estimated_rewards_by_reg_model=expected_reward\n)\n# check the performance of OPE\n- ci_bound = gt_std * 3 / np.sqrt(ground_truth_policy_value.shape[0])\n+ ci_bound = gt_std * 3 / np.sqrt(q_pi_e.shape[0])\nprint(f\"gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}\")\nfor key in estimated_policy_value:\nprint(\n@@ -73,7 +71,7 @@ def test_expected_value_of_random_evaluation_policy(\n), f\"OPE of {key} did not work well (absolute error is greator than 3*sigma)\"\n-def test_response_format_using_random_evaluation_policy(\n+def test_response_format_of_ope_estimators_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_dm_estimators.py", "new_path": "tests/ope/test_dm_estimators.py", "diff": "@@ -18,11 +18,9 @@ def test_dm_using_random_evaluation_policy(\n)\naction_dist = random_action_dist\n# compute ground truth policy value using expected reward\n- ground_truth_policy_value = np.average(\n- expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1\n- )\n+ q_pi_e = np.average(expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1)\n# compute statistics of ground truth policy value\n- gt_mean = ground_truth_policy_value.mean()\n+ gt_mean = q_pi_e.mean()\n# prepare dm\ndm = DirectMethod()\n# prepare input dict\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_dr_estimators.py", "new_path": "tests/ope/test_dr_estimators.py", "diff": "@@ -22,12 +22,12 @@ dr = DoublyRobust()\ndr_shrink_0 = DoublyRobustWithShrinkage(lambda_=0)\ndr_shrink_max = DoublyRobustWithShrinkage(lambda_=1e10)\nsndr = SelfNormalizedDoublyRobust()\n-swipw_0 = SwitchInverseProbabilityWeighting(tau=0)\n-swipw_max = SwitchInverseProbabilityWeighting(tau=1e10)\n-swdr_0 = SwitchDoublyRobust(tau=0)\n-swdr_max = SwitchDoublyRobust(tau=1e10)\n+switch_ipw_0 = SwitchInverseProbabilityWeighting(tau=0)\n+switch_ipw_max = SwitchInverseProbabilityWeighting(tau=1e10)\n+switch_dr_0 = SwitchDoublyRobust(tau=0)\n+switch_dr_max = SwitchDoublyRobust(tau=1e10)\n-dr_estimators = [dr, dr_shrink_0, sndr, swipw_0, swdr_0]\n+dr_estimators = [dr, dr_shrink_0, sndr, switch_ipw_0, switch_dr_0]\ndef test_dr_using_random_evaluation_policy(\n@@ -69,7 +69,7 @@ def test_dr_using_random_evaluation_policy(\n_ = estimator.estimate_policy_value(**input_dict)\n-def test_sndr_range_using_random_evaluation_policy(\n+def test_boundedness_of_sndr_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n@@ -125,11 +125,11 @@ def test_dr_shrinkage_using_random_evaluation_policy(\n), \"DoublyRobustWithShrinkage (lambda=inf) should be almost the same as DoublyRobust\"\n-def test_swipw_using_random_evaluation_policy(\n+def test_switch_ipw_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the swipw shrinkage estimators using synthetic bandit data and random evaluation policy\n+ Test the switch_ipw shrinkage estimators using synthetic bandit data and random evaluation policy\n\"\"\"\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n@@ -145,21 +145,21 @@ def test_swipw_using_random_evaluation_policy(\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\ndm_value = dm.estimate_policy_value(**input_dict)\nipw_value = ipw.estimate_policy_value(**input_dict)\n- swipw_0_value = swipw_0.estimate_policy_value(**input_dict)\n- swipw_max_value = swipw_max.estimate_policy_value(**input_dict)\n+ switch_ipw_0_value = switch_ipw_0.estimate_policy_value(**input_dict)\n+ switch_ipw_max_value = switch_ipw_max.estimate_policy_value(**input_dict)\nassert (\n- dm_value == swipw_0_value\n+ dm_value == switch_ipw_0_value\n), \"SwitchIPW (tau=0) should be the same as DirectMethod\"\nassert (\n- ipw_value == swipw_max_value\n+ ipw_value == switch_ipw_max_value\n), \"SwitchIPW (tau=1e10) should be the same as IPW\"\n-def test_swdr_using_random_evaluation_policy(\n+def test_switch_dr_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the dr swdr using synthetic bandit data and random evaluation policy\n+ Test the dr switch_dr using synthetic bandit data and random evaluation policy\n\"\"\"\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n@@ -175,11 +175,11 @@ def test_swdr_using_random_evaluation_policy(\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\ndm_value = dm.estimate_policy_value(**input_dict)\ndr_value = dr.estimate_policy_value(**input_dict)\n- swdr_0_value = swdr_0.estimate_policy_value(**input_dict)\n- swdr_max_value = swdr_max.estimate_policy_value(**input_dict)\n+ switch_dr_0_value = switch_dr_0.estimate_policy_value(**input_dict)\n+ switch_dr_max_value = switch_dr_max.estimate_policy_value(**input_dict)\nassert (\n- dm_value == swdr_0_value\n+ dm_value == switch_dr_0_value\n), \"SwitchDR (tau=0) should be the same as DirectMethod\"\nassert (\n- dr_value == swdr_max_value\n+ dr_value == switch_dr_max_value\n), \"SwitchDR (tau=1e10) should be the same as DoublyRobust\"\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_ipw_estimators.py", "new_path": "tests/ope/test_ipw_estimators.py", "diff": "@@ -49,7 +49,7 @@ def test_ipw_using_random_evaluation_policy(\n_ = estimator.estimate_policy_value(**input_dict)\n-def test_snipw_range_using_random_evaluation_policy(\n+def test_boundedness_of_snipw_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_regression_models.py", "new_path": "tests/ope/test_regression_models.py", "diff": "@@ -35,11 +35,9 @@ def test_performance_of_binary_outcome_models(\nexpected_reward = np.expand_dims(bandit_feedback[\"expected_reward\"], axis=-1)\naction_dist = random_action_dist\n# compute ground truth policy value using expected reward\n- ground_truth_policy_value = np.average(\n- expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1\n- )\n+ q_pi_e = np.average(expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1)\n# compute statistics of ground truth policy value\n- gt_mean = ground_truth_policy_value.mean()\n+ gt_mean = q_pi_e.mean()\nrandom_state = 12345\nauc_scores: Dict[str, float] = {}\n# check ground truth\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix the naming of variables and functions
641,006
24.01.2021 19:54:56
-32,400
c98b0d60d1814b2306212804c9efd83c51c7dd22
fix comment and typo
[ { "change_type": "MODIFY", "old_path": "tests/ope/conftest.py", "new_path": "tests/ope/conftest.py", "diff": "@@ -87,7 +87,7 @@ def synthetic_bandit_feedback() -> BanditFeedback:\nreturn bandit_feedback\n-# adjust expected reward of synthetic bandit feedback\n+# make the expected reward of synthetic bandit feedback close to that of the Open Bandit Dataset\[email protected](scope=\"session\")\ndef fixed_synthetic_bandit_feedback(synthetic_bandit_feedback) -> BanditFeedback:\n# set random\n@@ -142,7 +142,7 @@ def expected_reward_0() -> np.ndarray:\n)\n-# evaluation policy of logistic model\n+# logistic evaluation policy\[email protected](scope=\"session\")\ndef logistic_evaluation_policy(synthetic_bandit_feedback) -> LogisticEpsilonGreedy:\nrandom_state = 12345\n@@ -163,13 +163,13 @@ def logistic_evaluation_policy(synthetic_bandit_feedback) -> LogisticEpsilonGree\nreturn evaluation_policy\n-# evaluation policy of logistic model\n+# logistic evaluation policy\[email protected](scope=\"session\")\ndef logistic_batch_action_dist(logistic_evaluation_policy) -> np.ndarray:\nreturn np.array([1])\n-# evaluation policy of random model\n+# random evaluation policy\[email protected](scope=\"session\")\ndef random_action_dist(synthetic_bandit_feedback) -> np.ndarray:\nn_actions = synthetic_bandit_feedback[\"n_actions\"]\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_all_estimators.py", "new_path": "tests/ope/test_all_estimators.py", "diff": "@@ -44,7 +44,7 @@ def test_performance_of_ope_estimators_using_random_evaluation_policy(\n# compute statistics of ground truth policy value\ngt_mean = q_pi_e.mean()\ngt_std = q_pi_e.std(ddof=1)\n- # extract most of all estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\n+ # test most of the estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\nall_estimators = ope.__all_estimators__\nestimators = [\ngetattr(ope.estimators, estimator_name)()\n@@ -81,7 +81,7 @@ def test_response_format_of_ope_estimators_using_random_evaluation_policy(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n)\naction_dist = random_action_dist\n- # extract most of all estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\n+ # test most of the estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\nall_estimators = ope.__all_estimators__\nestimators = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n@@ -98,11 +98,11 @@ def test_response_format_of_ope_estimators_using_random_evaluation_policy(\n)\n# check the format of OPE\nfor key in estimated_policy_value:\n- # check key of confidence intervals\n+ # check the keys of the output dictionary of the estimate_intervals method\nassert set(estimated_intervals[key].keys()) == set(\n[\"mean\", \"95.0% CI (lower)\", \"95.0% CI (upper)\"]\n), f\"Confidence interval of {key} has invalid keys\"\n- # check the relationship between mean and confidence interval\n+ # check the relationship between the means and the confidence bounds estimated by OPE estimators\nassert (\nestimated_intervals[key][\"95.0% CI (lower)\"] <= estimated_policy_value[key]\n) and (\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_dm_estimators.py", "new_path": "tests/ope/test_dm_estimators.py", "diff": "@@ -11,7 +11,7 @@ def test_dm_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the performance of dm-like estimators using synthetic bandit data and random evaluation policy\n+ Test the performance of the direct method using synthetic bandit data and random evaluation policy\n\"\"\"\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n@@ -44,12 +44,12 @@ def test_dm_using_random_evaluation_policy(\nestimated_policy_value = dm.estimate_policy_value(**input_dict)\nassert (\ngt_mean == estimated_policy_value\n- ), \"DM should return gt mean when action_dist and reward function are both true\"\n- # remove unused keys\n+ ), \"DM should be perfect when the regression model is perfect\"\n+ # remove unnecessary keys\ndel input_dict[\"reward\"]\ndel input_dict[\"pscore\"]\ndel input_dict[\"action\"]\nestimated_policy_value = dm.estimate_policy_value(**input_dict)\nassert (\ngt_mean == estimated_policy_value\n- ), \"DM should return gt mean when action_dist and reward function are both true\"\n+ ), \"DM should be perfect when the regression model is perfect\"\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_dr_estimators.py", "new_path": "tests/ope/test_dr_estimators.py", "diff": "@@ -34,7 +34,7 @@ def test_dr_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the format of dr-like estimators using synthetic bandit data and random evaluation policy\n+ Test the format of dr variants using synthetic bandit data and random evaluation policy\n\"\"\"\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n@@ -48,13 +48,13 @@ def test_dr_using_random_evaluation_policy(\n}\ninput_dict[\"action_dist\"] = action_dist\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\n- # dr estimtors requires all arguments\n+ # dr estimtors require all arguments\nfor estimator in dr_estimators:\nestimated_policy_value = estimator.estimate_policy_value(**input_dict)\nassert isinstance(\nestimated_policy_value, float\n), f\"invalid type response: {estimator}\"\n- # remove used keys\n+ # remove necessary keys\ndel input_dict[\"reward\"]\ndel input_dict[\"pscore\"]\ndel input_dict[\"action\"]\n@@ -73,7 +73,7 @@ def test_boundedness_of_sndr_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the range of sndr estimators using synthetic bandit data and random evaluation policy\n+ Test the boundedness of sndr estimators using synthetic bandit data and random evaluation policy\n\"\"\"\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n@@ -87,12 +87,12 @@ def test_boundedness_of_sndr_using_random_evaluation_policy(\n}\ninput_dict[\"action_dist\"] = action_dist\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\n- # make pscore too small (to check the normalization effect)\n+ # make pscore too small (to check the boundedness of sndr)\ninput_dict[\"pscore\"] = input_dict[\"pscore\"] ** 3\nestimated_policy_value = sndr.estimate_policy_value(**input_dict)\nassert (\nestimated_policy_value <= 2\n- ), f\"estimated policy value of sndr should not be greater than 2 even if pscore is too small, but {estimated_policy_value}\"\n+ ), f\"estimated policy value of sndr should be smaller than or equal to 2 (because of its 2-boundedness), but the value is: {estimated_policy_value}\"\ndef test_dr_shrinkage_using_random_evaluation_policy(\n@@ -129,7 +129,7 @@ def test_switch_ipw_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the switch_ipw shrinkage estimators using synthetic bandit data and random evaluation policy\n+ Test the switch_ipw estimators using synthetic bandit data and random evaluation policy\n\"\"\"\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n@@ -159,7 +159,7 @@ def test_switch_dr_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the dr switch_dr using synthetic bandit data and random evaluation policy\n+ Test the switch_dr using synthetic bandit data and random evaluation policy\n\"\"\"\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_ipw_estimators.py", "new_path": "tests/ope/test_ipw_estimators.py", "diff": "@@ -19,7 +19,7 @@ def test_ipw_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the format of ipw-like estimators using synthetic bandit data and random evaluation policy\n+ Test the format of ipw variants using synthetic bandit data and random evaluation policy\n\"\"\"\naction_dist = random_action_dist\n# prepare input dict\n@@ -35,7 +35,7 @@ def test_ipw_using_random_evaluation_policy(\nassert isinstance(\nestimated_policy_value, float\n), f\"invalid type response: {estimator}\"\n- # remove used keys\n+ # remove necessary keys\ndel input_dict[\"reward\"]\ndel input_dict[\"pscore\"]\ndel input_dict[\"action\"]\n@@ -53,10 +53,10 @@ def test_boundedness_of_snipw_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n- Test the range of snipw estimators using synthetic bandit data and random evaluation policy\n+ Test the boundedness of snipw estimators using synthetic bandit data and random evaluation policy\n\"\"\"\naction_dist = random_action_dist\n- # prepare dm\n+ # prepare snipw\nsnipw = SelfNormalizedInverseProbabilityWeighting()\n# prepare input dict\ninput_dict = {\n@@ -65,9 +65,9 @@ def test_boundedness_of_snipw_using_random_evaluation_policy(\nif k in [\"reward\", \"action\", \"pscore\", \"position\"]\n}\ninput_dict[\"action_dist\"] = action_dist\n- # make pscore too small (to check the normalization effect)\n+ # make pscore too small (to check the boundedness of snipw)\ninput_dict[\"pscore\"] = input_dict[\"pscore\"] ** 3\nestimated_policy_value = snipw.estimate_policy_value(**input_dict)\nassert (\nestimated_policy_value <= 1\n- ), f\"estimated policy value of snipw should not be greater than 1 even if pscore is too small, but {estimated_policy_value}\"\n+ ), f\"estimated policy value of snipw should be smaller than or equal to 1 (because of its 1-boundedness), but the value is: {estimated_policy_value}\"\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_meta.py", "new_path": "tests/ope/test_meta.py", "diff": "@@ -219,10 +219,10 @@ def test_meta_estimation_format(\n)\nassert ope_.estimate_policy_values(random_action_dist) == {\n\"dm\": mock_policy_value\n- }, \"OffPolicyEvaluation.estimate_policy_values ([DirectMethod]) returns wrong value\"\n+ }, \"OffPolicyEvaluation.estimate_policy_values ([DirectMethod]) returns a wrong value\"\nassert ope_.estimate_intervals(random_action_dist) == {\n\"dm\": mock_confidence_interval\n- }, \"OffPolicyEvaluation.estimate_intervals ([DirectMethod]) returns wrong value\"\n+ }, \"OffPolicyEvaluation.estimate_intervals ([DirectMethod]) returns a wrong value\"\nwith pytest.raises(AssertionError, match=r\"action_dist must be 3-dimensional.*\"):\nope_.estimate_policy_values(\nrandom_action_dist[:, :, 0]\n@@ -234,11 +234,11 @@ def test_meta_estimation_format(\nassert ope_.estimate_policy_values(random_action_dist) == {\n\"dm\": mock_policy_value,\n\"ipw\": mock_policy_value + ipw.eps,\n- }, \"OffPolicyEvaluation.estimate_policy_values ([DirectMethod, IPW]) returns wrong value\"\n+ }, \"OffPolicyEvaluation.estimate_policy_values ([DirectMethod, IPW]) returns a wrong value\"\nassert ope_.estimate_intervals(random_action_dist) == {\n\"dm\": mock_confidence_interval,\n\"ipw\": {k: v + ipw.eps for k, v in mock_confidence_interval.items()},\n- }, \"OffPolicyEvaluation.estimate_intervals ([DirectMethod]) returns wrong value\"\n+ }, \"OffPolicyEvaluation.estimate_intervals ([DirectMethod]) returns a wrong value\"\ndef test_meta_post_init_format(\n@@ -251,7 +251,7 @@ def test_meta_post_init_format(\nope_ = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw, ipw2]\n)\n- assert ope_.ope_estimators_ == {\"ipw\": ipw2}, \"__post_init__ returns wrong value\"\n+ assert ope_.ope_estimators_ == {\"ipw\": ipw2}, \"__post_init__ returns a wrong value\"\n# __post_init__ can handle the same estimator if the estimator names are different\nope_ = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw, ipw3]\n@@ -259,7 +259,7 @@ def test_meta_post_init_format(\nassert ope_.ope_estimators_ == {\n\"ipw\": ipw,\n\"ipw3\": ipw3,\n- }, \"__post_init__ returns wrong value\"\n+ }, \"__post_init__ returns a wrong value\"\ndef test_meta_create_estimator_inputs_format(\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_regression_models.py", "new_path": "tests/ope/test_regression_models.py", "diff": "@@ -42,7 +42,7 @@ def test_performance_of_binary_outcome_models(\nauc_scores: Dict[str, float] = {}\n# check ground truth\nprint(f\"gt_mean: {gt_mean}\")\n- # check the performance of regression models using doubly robust criteria (|\\hat{q} - q| <= |q| is satisfied with high probability)\n+ # check the performance of regression models using doubly robust criteria (|\\hat{q} - q| <= |q| is satisfied with a high probability)\ndr_criteria_pass_rate = 0.8\nfit_methods = [\"normal\", \"iw\", \"mrdr\"]\nfor fit_method in fit_methods:\n@@ -92,10 +92,10 @@ def test_performance_of_binary_outcome_models(\n)\nassert (\nnp.mean(dr_criteria <= 0) >= dr_criteria_pass_rate\n- ), f\"Dr criteria should not be larger then 0 with probability {dr_criteria_pass_rate}\"\n+ ), f\" should be satisfied with a probability at least {dr_criteria_pass_rate}\"\nfor model_name in auc_scores:\nprint(f\"AUC of {model_name} is {auc_scores[model_name]}\")\nassert (\nauc_scores[model_name] > 0.5\n- ), f\"AUC of {model_name} should be greator than 0.5\"\n+ ), f\"AUC of {model_name} should be greater than 0.5\"\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix comment and typo
641,006
24.01.2021 19:57:20
-32,400
8dd5563b6ac93da8eeb689c02d7e0df14378eb49
add random state
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_all_estimators.py", "new_path": "tests/ope/test_all_estimators.py", "diff": "@@ -94,7 +94,9 @@ def test_response_format_of_ope_estimators_using_random_evaluation_policy(\naction_dist=action_dist, estimated_rewards_by_reg_model=expected_reward\n)\nestimated_intervals = ope_instance.estimate_intervals(\n- action_dist=action_dist, estimated_rewards_by_reg_model=expected_reward\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=expected_reward,\n+ random_state=12345,\n)\n# check the format of OPE\nfor key in estimated_policy_value:\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add random state
641,006
24.01.2021 20:02:28
-32,400
7eb86461a33c6011f3013b82cf2f18fa597acb40
remove logistic evaluation policy
[ { "change_type": "MODIFY", "old_path": "tests/ope/conftest.py", "new_path": "tests/ope/conftest.py", "diff": "-from typing import Set, Tuple, List\n-from dataclasses import dataclass\n+from typing import Set\nimport copy\nimport numpy as np\nimport pytest\nfrom sklearn.utils import check_random_state\n-from obp.policy import Random, LogisticEpsilonGreedy\n+from obp.policy import Random\nfrom obp.types import BanditFeedback\nfrom obp.dataset import (\nSyntheticBanditDataset,\n@@ -16,59 +15,6 @@ from obp.dataset import (\nfrom obp.utils import sigmoid\n-@dataclass\n-class LogisticEpsilonGreedyBatch(LogisticEpsilonGreedy):\n- \"\"\"\n- WIP: Add random action flag and compute_batch_action_dist method to LogisticEpsilonGreedy\n-\n- \"\"\"\n-\n- def select_action(self, context: np.ndarray) -> Tuple[np.ndarray, bool]:\n- \"\"\"Select action for new data.\n-\n- Parameters\n- ----------\n- context: array-like, shape (1, dim_context)\n- Observed context vector.\n-\n- Returns\n- ----------\n- selected_actions: array-like, shape (len_list, )\n- List of selected actions.\n-\n- random action flag: bool\n- Whether the action is randomly selected\n- \"\"\"\n- if self.random_.rand() > self.epsilon:\n- theta = np.array(\n- [model.predict_proba(context) for model in self.model_list]\n- ).flatten()\n- return theta.argsort()[::-1][: self.len_list], False\n- else:\n- return (\n- self.random_.choice(self.n_actions, size=self.len_list, replace=False),\n- True,\n- )\n-\n- def compute_batch_action_dist(\n- self, context: np.ndarray\n- ) -> Tuple[np.ndarray, List[bool]]:\n- \"\"\"Select action for new data.\n-\n- Parameters\n- ----------\n- context: array-like, shape (n_rounds, dim_context)\n- Observed context matrix.\n-\n- Returns\n- ----------\n- action_dist: array-like, shape (n_rounds, n_actions, len_list)\n- Probability estimates of each arm being the best one for each sample, action, and position.\n-\n- \"\"\"\n- return np.array([1]), [False]\n-\n-\n# generate synthetic dataset using SyntheticBanditDataset\[email protected](scope=\"session\")\ndef synthetic_bandit_feedback() -> BanditFeedback:\n@@ -142,33 +88,6 @@ def expected_reward_0() -> np.ndarray:\n)\n-# logistic evaluation policy\[email protected](scope=\"session\")\n-def logistic_evaluation_policy(synthetic_bandit_feedback) -> LogisticEpsilonGreedy:\n- random_state = 12345\n- epsilon = 0.05\n- dim = synthetic_bandit_feedback[\"context\"].shape[1]\n- n_actions = synthetic_bandit_feedback[\"n_actions\"]\n- evaluation_policy = LogisticEpsilonGreedy(\n- dim=dim,\n- n_actions=n_actions,\n- len_list=synthetic_bandit_feedback[\"position\"].ndim,\n- random_state=random_state,\n- epsilon=epsilon,\n- )\n- # set coef_ of evaluation policy\n- random_ = check_random_state(random_state)\n- for action in range(n_actions):\n- evaluation_policy.model_list[action]._m = random_.uniform(size=dim)\n- return evaluation_policy\n-\n-\n-# logistic evaluation policy\[email protected](scope=\"session\")\n-def logistic_batch_action_dist(logistic_evaluation_policy) -> np.ndarray:\n- return np.array([1])\n-\n-\n# random evaluation policy\[email protected](scope=\"session\")\ndef random_action_dist(synthetic_bandit_feedback) -> np.ndarray:\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_all_estimators.py", "new_path": "tests/ope/test_all_estimators.py", "diff": "@@ -4,7 +4,6 @@ import numpy as np\nfrom obp import ope\nfrom obp.ope import OffPolicyEvaluation\n-from obp.policy import BaseContextualPolicy\nfrom obp.types import BanditFeedback\n@@ -12,7 +11,6 @@ def test_fixture(\nsynthetic_bandit_feedback: BanditFeedback,\nexpected_reward_0: np.ndarray,\nfeedback_key_set: Set[str],\n- logistic_evaluation_policy: BaseContextualPolicy,\nrandom_action_dist: np.ndarray,\n) -> None:\n\"\"\"\n@@ -24,9 +22,6 @@ def test_fixture(\nassert feedback_key_set == set(\nsynthetic_bandit_feedback.keys()\n), f\"Key set of bandit feedback should be {feedback_key_set}, but {synthetic_bandit_feedback.keys()}\"\n- assert synthetic_bandit_feedback[\"n_actions\"] == len(\n- logistic_evaluation_policy.model_list\n- ), \"model list length of logistic evaluation policy should be the same as n_actions\"\ndef test_performance_of_ope_estimators_using_random_evaluation_policy(\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
remove logistic evaluation policy
641,006
24.01.2021 21:18:44
-32,400
5503320589f64af60ec89a5310906e40c74d20d8
add noqa when importing enable_hist_gradient_boosting
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_regression_models.py", "new_path": "tests/ope/test_regression_models.py", "diff": "@@ -3,7 +3,7 @@ from pathlib import Path\nimport yaml\nimport numpy as np\n-from sklearn.experimental import enable_hist_gradient_boosting\n+from sklearn.experimental import enable_hist_gradient_boosting # noqa\nfrom sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add noqa when importing enable_hist_gradient_boosting
641,014
28.01.2021 03:58:20
-32,400
4203ad3823d2c09c1a5ef12e529805d48d114a69
fix example file names
[ { "change_type": "MODIFY", "old_path": "examples/README.md", "new_path": "examples/README.md", "diff": "# Examples\n-- `examples_with_obd`: example implementations for evaluating standard off-policy estimators with the small sample Open Bandit Dataset.\n-- `examples_with_synthetic`: example implementations for evaluating several off-policy estimators with synthetic bandit datasets.\n-- `quickstart`: a quickstart guide of Open Bandit Dataset and Pipeline.\n+- `obd/`: example implementations for evaluating standard off-policy estimators with the small sample Open Bandit Dataset.\n+- `synthetic/`: example implementations for evaluating several off-policy estimators with synthetic bandit datasets.\n+- `quickstart/`: some quickstart notebooks to guide the usage of the Open Bandit Pipeline.\n" }, { "change_type": "RENAME", "old_path": "examples/examples_with_obd/README.md", "new_path": "examples/obd/README.md", "diff": "-# Examples with Open Bandit Dataset (OBD)\n+# Example with the Open Bandit Dataset (OBD)\n## Description\n@@ -57,5 +57,5 @@ python evaluate_off_policy_estimators.py\\\n```\nPlease refer to [this page](https://zr-obp.readthedocs.io/en/latest/evaluation_ope.html) for the evaluation of OPE protocol using our real-world data.\n-Please visit [examples_with_synthetic](https://github.com/st-tech/zr-obp/tree/master/examples/examples_with_synthetic) to try the evaluation of OPE estimators with synthetic bandit datasets.\n+Please visit [synthetic](https://github.com/st-tech/zr-obp/tree/master/examples/synthetic) to try the evaluation of OPE estimators with synthetic bandit datasets.\nMoreover, in [benchmark/ope](https://github.com/st-tech/zr-obp/tree/master/benchmark/ope), we performed the benchmark experiments on several OPE estimators using the full size Open Bandit Dataset.\n" }, { "change_type": "RENAME", "old_path": "examples/examples_with_obd/conf/hyperparams.yaml", "new_path": "examples/obd/conf/hyperparams.yaml", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/examples_with_obd/evaluate_off_policy_estimators.py", "new_path": "examples/obd/evaluate_off_policy_estimators.py", "diff": "@@ -139,8 +139,7 @@ if __name__ == \"__main__\":\n)\n# evaluate estimators' performances using relative estimation error (relative-ee)\nope = OffPolicyEvaluation(\n- bandit_feedback=bandit_feedback,\n- ope_estimators=ope_estimators,\n+ bandit_feedback=bandit_feedback, ope_estimators=ope_estimators,\n)\naction_dist = np.tile(\naction_dist_single_round, (bandit_feedback[\"n_rounds\"], 1, 1)\n@@ -153,17 +152,12 @@ if __name__ == \"__main__\":\nreturn relative_ee_b\n- processed = Parallel(\n- backend=\"multiprocessing\",\n- n_jobs=n_jobs,\n- verbose=50,\n- )([delayed(process)(i) for i in np.arange(n_runs)])\n+ processed = Parallel(backend=\"multiprocessing\", n_jobs=n_jobs, verbose=50,)(\n+ [delayed(process)(i) for i in np.arange(n_runs)]\n+ )\nrelative_ee_dict = {est.estimator_name: dict() for est in ope_estimators}\nfor b, relative_ee_b in enumerate(processed):\n- for (\n- estimator_name,\n- relative_ee_,\n- ) in relative_ee_b.items():\n+ for (estimator_name, relative_ee_,) in relative_ee_b.items():\nrelative_ee_dict[estimator_name][b] = relative_ee_\nrelative_ee_df = DataFrame(relative_ee_dict).describe().T.round(6)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "examples/quickstart/README.md", "diff": "+# Quickstart\n+\n+- `obd.ipynb`: a quickstart guide of the Open Bandit Dataset and Pipeline.\n+- `synthetic.ipynb`\" a quickstart guide to implement the standard off-policy learning, off-policy evaluation (OPE), and the evaluation of OPE procedures with the Open Bandit Pipeline.\n" }, { "change_type": "RENAME", "old_path": "examples/quickstart/quickstart.ipynb", "new_path": "examples/quickstart/obd.ipynb", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/quickstart/quickstart_synthetic.ipynb", "new_path": "examples/quickstart/synthetic.ipynb", "diff": "\"- (3) Off-Policy Evaluation\\n\",\n\"- (4) Evaluation of OPE Estimators\\n\",\n\"\\n\",\n- \"Please see [https://github.com/st-tech/zr-obp/tree/master/examples/examples_with_synthetic](https://github.com/st-tech/zr-obp/tree/master/examples/examples_with_synthetic) \\n\",\n+ \"Please see [https://github.com/st-tech/zr-obp/tree/master/examples/synthetic](https://github.com/st-tech/zr-obp/tree/master/examples/synthetic) \\n\",\n\"for a more sophisticated example of the evaluation of OPE with synthetic datasets.\"\n]\n},\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"Please see [https://github.com/st-tech/zr-obp/tree/master/examples/examples_with_synthetic](https://github.com/st-tech/zr-obp/tree/master/examples/examples_with_synthetic) for a more sophisticated example of the evaluation of OPE with synthetic data.\"\n+ \"Please see [https://github.com/st-tech/zr-obp/tree/master/examples/synthetic](https://github.com/st-tech/zr-obp/tree/master/examples/synthetic) for a more sophisticated example of the evaluation of OPE with synthetic data.\"\n]\n},\n{\n" }, { "change_type": "RENAME", "old_path": "examples/examples_with_synthetic/README.md", "new_path": "examples/synthetic/README.md", "diff": "-# Examples with Synthetic Data\n+# Example with Synthetic Bandit Data\n## Description\n" }, { "change_type": "RENAME", "old_path": "examples/examples_with_synthetic/conf/hyperparams.yaml", "new_path": "examples/synthetic/conf/hyperparams.yaml", "diff": "" }, { "change_type": "RENAME", "old_path": "examples/examples_with_synthetic/evaluate_off_policy_estimators.py", "new_path": "examples/synthetic/evaluate_off_policy_estimators.py", "diff": "@@ -172,8 +172,7 @@ if __name__ == \"__main__\":\n)\n# evaluate estimators' performances using relative estimation error (relative-ee)\nope = OffPolicyEvaluation(\n- bandit_feedback=bandit_feedback_test,\n- ope_estimators=ope_estimators,\n+ bandit_feedback=bandit_feedback_test, ope_estimators=ope_estimators,\n)\nrelative_ee_i = ope.evaluate_performance_of_estimators(\nground_truth_policy_value=ground_truth_policy_value,\n@@ -183,17 +182,12 @@ if __name__ == \"__main__\":\nreturn relative_ee_i\n- processed = Parallel(\n- backend=\"multiprocessing\",\n- n_jobs=n_jobs,\n- verbose=50,\n- )([delayed(process)(i) for i in np.arange(n_runs)])\n+ processed = Parallel(backend=\"multiprocessing\", n_jobs=n_jobs, verbose=50,)(\n+ [delayed(process)(i) for i in np.arange(n_runs)]\n+ )\nrelative_ee_dict = {est.estimator_name: dict() for est in ope_estimators}\nfor i, relative_ee_i in enumerate(processed):\n- for (\n- estimator_name,\n- relative_ee_,\n- ) in relative_ee_i.items():\n+ for (estimator_name, relative_ee_,) in relative_ee_i.items():\nrelative_ee_dict[estimator_name][i] = relative_ee_\nrelative_ee_df = DataFrame(relative_ee_dict).describe().T.round(6)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix example file names
641,014
28.01.2021 23:14:13
-32,400
ec14d30102322fd1cdbcd9e5352b1b43c89f013f
fix synthetic example script
[ { "change_type": "MODIFY", "old_path": "examples/synthetic/README.md", "new_path": "examples/synthetic/README.md", "diff": "@@ -14,7 +14,6 @@ In the following, we evaluate the estimation performances of\n- Self-Normalized Inverse Probability Weighting (SNIPW)\n- Doubly Robust (DR)\n- Self-Normalized Doubly Robust (SNDR)\n-- Switch Inverse Probability Weighting (Switch-IPW)\n- Switch Doubly Robust (Switch-DR)\n- Doubly Robust with Optimistic Shrinkage (DRos)\n@@ -59,23 +58,21 @@ python evaluate_off_policy_estimators.py\\\n--n_jobs -1\\\n--random_state 12345\n-# relative-ee of OPE estimators and their standard deviations (lower is better).\n+# relative-ee of OPE estimators and their standard deviations (lower means accurate).\n# It appears that the performances of some OPE estimators depend on the choice of their hyperparameters.\n# =============================================\n# random_state=12345\n# ---------------------------------------------\n# mean std\n-# dm 0.011110 0.000565\n-# ipw 0.001953 0.000387\n-# snipw 0.002036 0.000835\n-# dr 0.001573 0.000631\n-# sndr 0.001578 0.000625\n-# switch-ipw (tau=1) 0.138523 0.000514\n-# switch-ipw (tau=100) 0.001953 0.000387\n-# switch-dr (tau=1) 0.021875 0.000414\n-# switch-dr (tau=100) 0.001573 0.000631\n-# dr-os (lambda=1) 0.010952 0.000567\n-# dr-os (lambda=100) 0.001835 0.000884\n+# dm 0.180916 0.000650\n+# ipw 0.013690 0.008988\n+# snipw 0.014984 0.006156\n+# dr 0.007802 0.003867\n+# sndr 0.010062 0.002300\n+# switch-dr (tau=1) 0.180916 0.000650\n+# switch-dr (tau=100) 0.007802 0.003867\n+# dr-os (lambda=1) 0.180708 0.000646\n+# dr-os (lambda=100) 0.162749 0.000371\n# =============================================\n```\n" }, { "change_type": "MODIFY", "old_path": "examples/synthetic/evaluate_off_policy_estimators.py", "new_path": "examples/synthetic/evaluate_off_policy_estimators.py", "diff": "@@ -24,7 +24,6 @@ from obp.ope import (\nDoublyRobust,\nSelfNormalizedDoublyRobust,\nSwitchDoublyRobust,\n- SwitchInverseProbabilityWeighting,\nDoublyRobustWithShrinkage,\n)\n@@ -46,8 +45,6 @@ ope_estimators = [\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\nSelfNormalizedDoublyRobust(),\n- SwitchInverseProbabilityWeighting(tau=1, estimator_name=\"switch-ipw (tau=1)\"),\n- SwitchInverseProbabilityWeighting(tau=100, estimator_name=\"switch-ipw (tau=100)\"),\nSwitchDoublyRobust(tau=1, estimator_name=\"switch-dr (tau=1)\"),\nSwitchDoublyRobust(tau=100, estimator_name=\"switch-dr (tau=100)\"),\nDoublyRobustWithShrinkage(lambda_=1, estimator_name=\"dr-os (lambda=1)\"),\n@@ -56,7 +53,7 @@ ope_estimators = [\nif __name__ == \"__main__\":\nparser = argparse.ArgumentParser(\n- description=\"evaluate off-policy estimators with synthetic data.\"\n+ description=\"evaluate off-policy estimators with synthetic bandit data.\"\n)\nparser.add_argument(\n\"--n_runs\", type=int, default=1, help=\"number of simulations in the experiment.\"\n@@ -125,7 +122,6 @@ if __name__ == \"__main__\":\n# define evaluation policy using IPWLearner\nevaluation_policy = IPWLearner(\nn_actions=dataset.n_actions,\n- len_list=dataset.len_list,\nbase_classifier=base_model_dict[base_model_for_evaluation_policy](\n**hyperparams[base_model_for_evaluation_policy]\n),\n@@ -143,9 +139,8 @@ if __name__ == \"__main__\":\npscore=bandit_feedback_train[\"pscore\"],\n)\n# predict the action decisions for the test set of the synthetic logged bandit feedback\n- action_dist = evaluation_policy.predict_proba(\n+ action_dist = evaluation_policy.predict(\ncontext=bandit_feedback_test[\"context\"],\n- tau=0.1, # temperature hyperparameter\n)\n# estimate the ground-truth policy values of the evaluation policy\n# using the full expected reward contained in the test set of synthetic bandit feedback\n@@ -157,7 +152,6 @@ if __name__ == \"__main__\":\n# estimate the mean reward function of the test set of synthetic bandit feedback with ML model\nregression_model = RegressionModel(\nn_actions=dataset.n_actions,\n- len_list=dataset.len_list,\naction_context=dataset.action_context,\nbase_model=base_model_dict[base_model_for_reg_model](\n**hyperparams[base_model_for_reg_model]\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix synthetic example script
641,014
28.01.2021 23:17:54
-32,400
610d982c0ab0897737bb01c70b3f67b7abe5b523
fix path of examples
[ { "change_type": "MODIFY", "old_path": "examples/quickstart/synthetic.ipynb", "new_path": "examples/quickstart/synthetic.ipynb", "diff": "\"- (3) Off-Policy Evaluation\\n\",\n\"- (4) Evaluation of OPE Estimators\\n\",\n\"\\n\",\n- \"Please see [examples/synthetic](../examples/synthetic) for a more sophisticated example of the evaluation of OPE with synthetic bandit data.\"\n+ \"Please see [examples/synthetic](../synthetic) for a more sophisticated example of the evaluation of OPE with synthetic bandit data.\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"Please see [examples/synthetic](../examples/synthetic) for a more sophisticated example of the evaluation of OPE with synthetic bandit data.\"\n+ \"Please see [examples/synthetic](../synthetic) for a more sophisticated example of the evaluation of OPE with synthetic bandit data.\"\n]\n},\n{\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix path of examples
641,006
31.01.2021 16:13:28
-32,400
b29f150b729f574ad7faaf59bcb858156b87a102
fix flake8 E231
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_meta.py", "new_path": "tests/ope/test_meta.py", "diff": "@@ -295,7 +295,10 @@ def test_meta_summarize_off_policy_estimates(\n)\nvalue, interval = ope_.summarize_off_policy_estimates(random_action_dist)\nexpected_value = pd.DataFrame(\n- {\"ipw\": mock_policy_value + ipw.eps, \"ipw3\": mock_policy_value + ipw3.eps,},\n+ {\n+ \"ipw\": mock_policy_value + ipw.eps,\n+ \"ipw3\": mock_policy_value + ipw3.eps,\n+ },\nindex=[\"estimated_policy_value\"],\n).T\nexpected_interval = pd.DataFrame(\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix flake8 E231
641,003
31.01.2021 19:12:57
-32,400
78b45b09759671b4505d361d6fedf0af7322f153
add test multiclass
[ { "change_type": "MODIFY", "old_path": "obp/dataset/multiclass.py", "new_path": "obp/dataset/multiclass.py", "diff": "@@ -149,12 +149,13 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- assert is_classifier(\n- self.base_classifier_b\n- ), f\"base_classifier_b must be a classifier\"\n- assert (0.0 <= self.alpha_b < 1.0) and isinstance(\n- self.alpha_b, float\n- ), f\"alpha_b must be a float in the [0,1) interval, but {self.alpha_b} is given\"\n+ if not is_classifier(self.base_classifier_b):\n+ raise ValueError(f\"base_classifier_b must be a classifier\")\n+ if not isinstance(self.alpha_b, float) or not (0.0 <= self.alpha_b < 1.0):\n+ raise ValueError(\n+ f\"alpha_b must be a float in the [0,1) interval, but {self.alpha_b} is given\"\n+ )\n+\nself.X, y = check_X_y(X=self.X, y=self.y, ensure_2d=True, multi_output=False)\nself.y = (rankdata(y, \"dense\") - 1).astype(int) # re-index action\n# fully observed labels\n@@ -282,9 +283,10 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\naxis 2 represents the length of list; it is always 1 in the current implementation.\n\"\"\"\n- assert (0.0 <= alpha_e <= 1.0) and isinstance(\n- alpha_e, float\n- ), f\"alpha_e must be a float in the [0,1] interval, but {alpha_e} is given\"\n+ if not isinstance(alpha_e, float) or not (0.0 <= alpha_e <= 1.0):\n+ raise ValueError(\n+ f\"alpha_e must be a float in the [0,1] interval, but {alpha_e} is given\"\n+ )\n# train a base ML classifier\nif base_classifier_e is None:\nbase_clf_e = clone(self.base_classifier_b)\n@@ -320,10 +322,10 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\npolicy value of a given action distribution (mostly evaluation policy).\n\"\"\"\n- assert action_dist.ndim == 3 and isinstance(\n- action_dist, np.ndarray\n- ), f\"action_dist must be a 3-D np.ndarray\"\n- assert (\n- action_dist.shape[0] == self.n_rounds_ev\n- ), \"the size of axis 0 of action_dist must be the same as the number of samples in the evaluation set\"\n+ if not isinstance(action_dist, np.ndarray) or action_dist.ndim != 3:\n+ raise ValueError(f\"action_dist must be a 3-D np.ndarray\")\n+ if action_dist.shape[0] != self.n_rounds_ev:\n+ raise ValueError(\n+ \"the size of axis 0 of action_dist must be the same as the number of samples in the evaluation set\"\n+ )\nreturn action_dist[np.arange(self.n_rounds_ev), self.y_ev].mean()\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tests/dataset/test_multiclass.py", "diff": "+import pytest\n+import numpy as np\n+from sklearn.datasets import load_digits\n+from sklearn.linear_model import LogisticRegression\n+\n+from typing import Tuple\n+\n+from obp.dataset import MultiClassToBanditReduction\n+\n+\[email protected](scope=\"session\")\n+def raw_data() -> Tuple[np.ndarray, np.ndarray]:\n+ X, y = load_digits(return_X_y=True)\n+ return X, y\n+\n+\n+def test_invalid_initialization(raw_data):\n+ X, y = raw_data\n+\n+ # invalid alpha_b\n+ with pytest.raises(ValueError):\n+ MultiClassToBanditReduction(\n+ X=X, y=y, base_classifier_b=LogisticRegression(), alpha_b=-0.3\n+ )\n+\n+ with pytest.raises(ValueError):\n+ MultiClassToBanditReduction(\n+ X=X, y=y, base_classifier_b=LogisticRegression(), alpha_b=1.3\n+ )\n+\n+ # invalid classifier\n+ with pytest.raises(ValueError):\n+ from sklearn.tree import DecisionTreeRegressor\n+\n+ MultiClassToBanditReduction(X=X, y=y, base_classifier_b=DecisionTreeRegressor)\n+\n+\n+def test_split_train_eval(raw_data):\n+ X, y = raw_data\n+\n+ eval_size = 1000\n+ mcbr = MultiClassToBanditReduction(\n+ X=X, y=y, base_classifier_b=LogisticRegression(), alpha_b=0.3\n+ )\n+ mcbr.split_train_eval(eval_size=eval_size)\n+\n+ assert eval_size == mcbr.n_rounds_ev\n+\n+\n+def test_obtain_batch_bandit_feedback(raw_data):\n+ X, y = raw_data\n+\n+ mcbr = MultiClassToBanditReduction(\n+ X=X, y=y, base_classifier_b=LogisticRegression(), alpha_b=0.3\n+ )\n+ mcbr.split_train_eval()\n+ bandit_feedback = mcbr.obtain_batch_bandit_feedback()\n+\n+ assert \"n_actions\" in bandit_feedback.keys()\n+ assert \"n_rounds\" in bandit_feedback.keys()\n+ assert \"context\" in bandit_feedback.keys()\n+ assert \"action\" in bandit_feedback.keys()\n+ assert \"reward\" in bandit_feedback.keys()\n+ assert \"position\" in bandit_feedback.keys()\n+ assert \"pscore\" in bandit_feedback.keys()\n+\n+\n+def test_obtain_action_dist_by_eval_policy(raw_data):\n+ X, y = raw_data\n+\n+ eval_size = 1000\n+ mcbr = MultiClassToBanditReduction(\n+ X=X, y=y, base_classifier_b=LogisticRegression(), alpha_b=0.3\n+ )\n+ mcbr.split_train_eval(eval_size=eval_size)\n+\n+ # invalid alpha_e\n+ with pytest.raises(ValueError):\n+ mcbr.obtain_action_dist_by_eval_policy(alpha_e=-0.3)\n+\n+ with pytest.raises(ValueError):\n+ mcbr.obtain_action_dist_by_eval_policy(alpha_e=1.3)\n+\n+ # valid type\n+ action_dist = mcbr.obtain_action_dist_by_eval_policy()\n+\n+ assert action_dist.shape[0] == eval_size\n+ n_actions = np.unique(y).shape[0]\n+ assert action_dist.shape[1] == n_actions\n+ assert action_dist.shape[2] == 1\n+\n+\n+def test_calc_ground_truth_policy_value(raw_data):\n+ X, y = raw_data\n+\n+ eval_size = 1000\n+ mcbr = MultiClassToBanditReduction(\n+ X=X, y=y, base_classifier_b=LogisticRegression(), alpha_b=0.3\n+ )\n+ mcbr.split_train_eval(eval_size=eval_size)\n+\n+ with pytest.raises(ValueError):\n+ invalid_action_dist = np.zeros(eval_size)\n+ mcbr.calc_ground_truth_policy_value(action_dist=invalid_action_dist)\n+\n+ with pytest.raises(ValueError):\n+ reshaped_action_dist = mcbr.obtain_action_dist_by_eval_policy().reshape(\n+ 1, -1, 1\n+ )\n+ mcbr.calc_ground_truth_policy_value(action_dist=reshaped_action_dist)\n+\n+ action_dist = mcbr.obtain_action_dist_by_eval_policy()\n+ ground_truth_policy_value = mcbr.calc_ground_truth_policy_value(\n+ action_dist=action_dist\n+ )\n+ assert isinstance(ground_truth_policy_value, float)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add test multiclass
641,006
31.01.2021 23:16:11
-32,400
653d43239f55fc716f62e7d1aec7fa1c3da11b20
fix obp modules to test meta.py
[ { "change_type": "MODIFY", "old_path": "obp/ope/meta.py", "new_path": "obp/ope/meta.py", "diff": "@@ -79,7 +79,7 @@ class OffPolicyEvaluation:\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- for key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\n+ for key_ in [\"action\", \"position\", \"reward\", \"pscore\"]:\nif key_ not in self.bandit_feedback:\nraise RuntimeError(f\"Missing key of {key_} in 'bandit_feedback'.\")\nself.ope_estimators_ = dict()\n@@ -87,9 +87,25 @@ class OffPolicyEvaluation:\nself.ope_estimators_[estimator.estimator_name] = estimator\ndef _create_estimator_inputs(\n- self, action_dist: np.ndarray, estimated_rewards_by_reg_model: np.ndarray\n+ self,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n) -> Dict[str, np.ndarray]:\n\"\"\"Create input dictionary to estimate policy value by subclasses of `BaseOffPolicyEstimator`\"\"\"\n+ if not isinstance(action_dist, np.ndarray):\n+ raise ValueError(\"action_dist must be ndarray\")\n+ if action_dist.ndim != 3:\n+ raise ValueError(\n+ f\"action_dist.ndim must be 3-dimensional, but {action_dist.ndim} is given\"\n+ )\n+ if estimated_rewards_by_reg_model is None:\n+ logger.warning(\n+ \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n+ )\n+ elif estimated_rewards_by_reg_model.shape != action_dist.shape:\n+ raise ValueError(\n+ \"estimated_rewards_by_reg_model.shape needs to be equal to action_dist.shape\"\n+ )\nestimator_inputs = {\ninput_: self.bandit_feedback[input_]\nfor input_ in [\"reward\", \"action\", \"position\", \"pscore\"]\n@@ -123,13 +139,6 @@ class OffPolicyEvaluation:\nDictionary containing estimated policy values by OPE estimators.\n\"\"\"\n- assert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\n- assert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n- if estimated_rewards_by_reg_model is None:\n- logger.warning(\n- \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n- )\n-\npolicy_value_dict = dict()\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\n@@ -177,13 +186,6 @@ class OffPolicyEvaluation:\nusing a nonparametric bootstrap procedure.\n\"\"\"\n- assert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\n- assert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n- if estimated_rewards_by_reg_model is None:\n- logger.warning(\n- \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n- )\n-\npolicy_value_interval_dict = dict()\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\n@@ -233,9 +235,6 @@ class OffPolicyEvaluation:\nEstimated policy values and their confidence intervals by OPE estimators.\n\"\"\"\n- assert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\n- assert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n-\npolicy_value_df = DataFrame(\nself.estimate_policy_values(\naction_dist=action_dist,\n@@ -298,16 +297,10 @@ class OffPolicyEvaluation:\nName of the bar figure.\n\"\"\"\n- assert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\n- assert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\nif fig_dir is not None:\nassert isinstance(fig_dir, Path), \"fig_dir must be a Path\"\nif fig_name is not None:\nassert isinstance(fig_name, str), \"fig_dir must be a string\"\n- if estimated_rewards_by_reg_model is None:\n- logger.warning(\n- \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n- )\nestimated_round_rewards_dict = dict()\nestimator_inputs = self._create_estimator_inputs(\n@@ -392,18 +385,18 @@ class OffPolicyEvaluation:\nDictionary containing evaluation metric for evaluating the estimation performance of OPE estimators.\n\"\"\"\n- assert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\n- assert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n- assert isinstance(\n- ground_truth_policy_value, float\n- ), \"ground_truth_policy_value must be a float\"\n- assert metric in [\n- \"relative-ee\",\n- \"se\",\n- ], \"metric must be either 'relative-ee' or 'se'\"\n- if estimated_rewards_by_reg_model is None:\n- logger.warning(\n- \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n+\n+ if not isinstance(ground_truth_policy_value, float):\n+ raise ValueError(\n+ f\"ground_truth_policy_value must be a float, but {ground_truth_policy_value} is given\"\n+ )\n+ if metric not in [\"relative-ee\", \"se\"]:\n+ raise ValueError(\n+ f\"metric must be either 'relative-ee' or 'se', but {metric} is given\"\n+ )\n+ if metric == \"relative-ee\" and ground_truth_policy_value == 0.0:\n+ raise ValueError(\n+ \"ground_truth_policy_value must be non-zero when metric is relative-ee\"\n)\neval_metric_ope_dict = dict()\n@@ -454,13 +447,6 @@ class OffPolicyEvaluation:\nEvaluation metric for evaluating the estimation performance of OPE estimators.\n\"\"\"\n- assert isinstance(action_dist, np.ndarray), \"action_dist must be ndarray\"\n- assert action_dist.ndim == 3, \"action_dist must be 3-dimensional\"\n- assert metric in [\n- \"relative-ee\",\n- \"se\",\n- ], \"metric must be either 'relative-ee' or 'se'\"\n-\neval_metric_ope_df = DataFrame(\nself.evaluate_performance_of_estimators(\nground_truth_policy_value=ground_truth_policy_value,\n" }, { "change_type": "MODIFY", "old_path": "obp/utils.py", "new_path": "obp/utils.py", "diff": "@@ -11,6 +11,44 @@ from sklearn.utils import check_random_state\nfrom sklearn.utils.validation import _deprecate_positional_args\n+def check_confidence_interval_arguments(\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+) -> None:\n+ \"\"\"Check confidence interval arguments.\n+\n+ Parameters\n+ ----------\n+ alpha: float, default=0.05\n+ Significant level of confidence intervals.\n+\n+ n_bootstrap_samples: int, default=10000\n+ Number of resampling performed in the bootstrap procedure.\n+\n+ random_state: int, default=None\n+ Controls the random seed in bootstrap sampling.\n+\n+ Returns\n+ ----------\n+ estimated_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\n+ if not (isinstance(alpha, float) and (0.0 < alpha < 1.0)):\n+ raise ValueError(\n+ f\"alpha must be a positive float (< 1.0), but {alpha} is given\"\n+ )\n+ if not (isinstance(n_bootstrap_samples, int) and n_bootstrap_samples > 0):\n+ raise ValueError(\n+ f\"n_bootstrap_samples must be a positive integer, but {n_bootstrap_samples} is given\"\n+ )\n+ if random_state is not None and not isinstance(random_state, int):\n+ raise ValueError(\n+ f\"random_state must be an integer, but {random_state} is given\"\n+ )\n+\n+\ndef estimate_confidence_interval_by_bootstrap(\nsamples: np.ndarray,\nalpha: float = 0.05,\n@@ -25,7 +63,7 @@ def estimate_confidence_interval_by_bootstrap(\nEmpirical observed samples to be used to estimate cumulative distribution function.\nalpha: float, default=0.05\n- P-value.\n+ Significant level of confidence intervals.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n@@ -39,12 +77,9 @@ def estimate_confidence_interval_by_bootstrap(\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n- assert (0.0 < alpha < 1.0) and isinstance(\n- alpha, float\n- ), f\"alpha must be a positive float, but {alpha} is given\"\n- assert (n_bootstrap_samples > 0) and isinstance(\n- n_bootstrap_samples, int\n- ), f\"n_bootstrap_samples must be a positive integer, but {n_bootstrap_samples} is given\"\n+ check_confidence_interval_arguments(\n+ alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state\n+ )\nboot_samples = list()\nrandom_ = check_random_state(random_state)\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_meta.py", "new_path": "tests/ope/test_meta.py", "diff": "from typing import Dict, Optional\nfrom dataclasses import dataclass\n+import itertools\nimport pytest\nimport numpy as np\n@@ -8,6 +9,7 @@ from pandas.testing import assert_frame_equal\nfrom obp.types import BanditFeedback\nfrom obp.ope import OffPolicyEvaluation, BaseOffPolicyEstimator\n+from obp.utils import check_confidence_interval_arguments\nmock_policy_value = 0.5\n@@ -96,6 +98,11 @@ class DirectMethodMock(BaseOffPolicyEstimator):\nmock_confidence_interval: Dict[str, float]\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ check_confidence_interval_arguments(\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\nreturn mock_confidence_interval\n@@ -207,45 +214,9 @@ ipw2 = InverseProbabilityWeightingMock(eps=0.02)\nipw3 = InverseProbabilityWeightingMock(estimator_name=\"ipw3\")\n-def test_meta_estimation_format(\n- synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n-) -> None:\n- \"\"\"\n- Test the response format of OffPolicyEvaluation\n- \"\"\"\n- # single ope estimator\n- ope_ = OffPolicyEvaluation(\n- bandit_feedback=synthetic_bandit_feedback, ope_estimators=[dm]\n- )\n- assert ope_.estimate_policy_values(random_action_dist) == {\n- \"dm\": mock_policy_value\n- }, \"OffPolicyEvaluation.estimate_policy_values ([DirectMethod]) returns a wrong value\"\n- assert ope_.estimate_intervals(random_action_dist) == {\n- \"dm\": mock_confidence_interval\n- }, \"OffPolicyEvaluation.estimate_intervals ([DirectMethod]) returns a wrong value\"\n- with pytest.raises(AssertionError, match=r\"action_dist must be 3-dimensional.*\"):\n- ope_.estimate_policy_values(\n- random_action_dist[:, :, 0]\n- ), \"action_dist must be 3-dimensional when using OffPolicyEvaluation\"\n- # multiple ope estimators\n- ope_ = OffPolicyEvaluation(\n- bandit_feedback=synthetic_bandit_feedback, ope_estimators=[dm, ipw]\n- )\n- assert ope_.estimate_policy_values(random_action_dist) == {\n- \"dm\": mock_policy_value,\n- \"ipw\": mock_policy_value + ipw.eps,\n- }, \"OffPolicyEvaluation.estimate_policy_values ([DirectMethod, IPW]) returns a wrong value\"\n- assert ope_.estimate_intervals(random_action_dist) == {\n- \"dm\": mock_confidence_interval,\n- \"ipw\": {k: v + ipw.eps for k, v in mock_confidence_interval.items()},\n- }, \"OffPolicyEvaluation.estimate_intervals ([DirectMethod]) returns a wrong value\"\n-\n-\n-def test_meta_post_init_format(\n- synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n-) -> None:\n+def test_meta_post_init(synthetic_bandit_feedback: BanditFeedback) -> None:\n\"\"\"\n- Test the post init format of OffPolicyEvaluation\n+ Test the __post_init__ function\n\"\"\"\n# __post_init__ saves the latter estimator when the same estimator name is used\nope_ = OffPolicyEvaluation(\n@@ -260,22 +231,115 @@ def test_meta_post_init_format(\n\"ipw\": ipw,\n\"ipw3\": ipw3,\n}, \"__post_init__ returns a wrong value\"\n+ # __post__init__ raises RuntimeError when necessary_keys are not included in the bandit_feedback\n+ necessary_keys = [\"action\", \"position\", \"reward\", \"pscore\"]\n+ for i in range(len(necessary_keys)):\n+ for deleted_keys in itertools.combinations(necessary_keys, i + 1):\n+ invalid_bandit_feedback_dict = {key: \"_\" for key in necessary_keys}\n+ # delete\n+ for k in deleted_keys:\n+ del invalid_bandit_feedback_dict[k]\n+ with pytest.raises(RuntimeError, match=r\"Missing key*\"):\n+ _ = OffPolicyEvaluation(\n+ bandit_feedback=invalid_bandit_feedback_dict, ope_estimators=[ipw]\n+ )\n+\n+# action_dist, estimated_rewards_by_reg_model, description\n+invalid_input_of_create_estimator_inputs = [\n+ (\n+ np.zeros((2, 3, 4)),\n+ np.zeros((2, 3, 3)),\n+ \"estimated_rewards_by_reg_model.shape needs to be equal to action_dist.shape\",\n+ ),\n+ (np.zeros((2, 3)), None, \"action_dist.ndim must be 3-dimensional\"),\n+ (\"3\", None, \"action_dist must be ndarray\"),\n+ (None, None, \"action_dist must be ndarray\"),\n+]\n+\n+valid_input_of_create_estimator_inputs = [\n+ (\n+ np.zeros((2, 3, 4)),\n+ np.zeros((2, 3, 4)),\n+ \"same shape\",\n+ ),\n+ (np.zeros((2, 3, 1)), None, \"estimated_rewards_by_reg_model is None\"),\n+]\n-def test_meta_create_estimator_inputs_format(\n- synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n+\[email protected](\n+ \"action_dist, estimated_rewards_by_reg_model, description\",\n+ invalid_input_of_create_estimator_inputs,\n+)\n+def test_meta_create_estimator_inputs_using_invalid_input_data(\n+ action_dist,\n+ estimated_rewards_by_reg_model,\n+ description: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n) -> None:\n\"\"\"\n- Test the _create_estimator_inputs format of OffPolicyEvaluation\n+ Test the _create_estimator_inputs using valid data\n\"\"\"\n- # __post_init__ saves the latter estimator when the same estimator name is used\nope_ = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw]\n)\n- inputs = ope_._create_estimator_inputs(\n- action_dist=None, estimated_rewards_by_reg_model=None\n+ # raise ValueError when the shape of two arrays are different\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_._create_estimator_inputs(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ # _create_estimator_inputs function is called in other functions\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.estimate_policy_values(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.estimate_intervals(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.summarize_off_policy_estimates(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n)\n- assert set(inputs.keys()) == set(\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=0.1,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.summarize_estimators_comparison(\n+ ground_truth_policy_value=0.1,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+\[email protected](\n+ \"action_dist, estimated_rewards_by_reg_model, description\",\n+ valid_input_of_create_estimator_inputs,\n+)\n+def test_meta_create_estimator_inputs_using_valid_input_data(\n+ action_dist,\n+ estimated_rewards_by_reg_model,\n+ description: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the _create_estimator_inputs using invalid data\n+ \"\"\"\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw]\n+ )\n+ estimator_inputs = ope_._create_estimator_inputs(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ assert set(estimator_inputs.keys()) == set(\n[\n\"reward\",\n\"action\",\n@@ -284,16 +348,208 @@ def test_meta_create_estimator_inputs_format(\n\"action_dist\",\n\"estimated_rewards_by_reg_model\",\n]\n- ), \"Invalid response format of _create_estimator_inputs\"\n+ ), f\"Invalid response of _create_estimator_inputs (test case: {description})\"\n+ # _create_estimator_inputs function is called in other functions\n+ _ = ope_.estimate_policy_values(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = ope_.estimate_intervals(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = ope_.summarize_off_policy_estimates(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = ope_.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=0.1,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = ope_.summarize_estimators_comparison(\n+ ground_truth_policy_value=0.1,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\[email protected](\n+ \"action_dist, estimated_rewards_by_reg_model, description\",\n+ valid_input_of_create_estimator_inputs,\n+)\n+def test_meta_estimate_policy_values_using_valid_input_data(\n+ action_dist,\n+ estimated_rewards_by_reg_model,\n+ description: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of estimate_policy_values using valid data\n+ \"\"\"\n+ # single ope estimator\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback, ope_estimators=[dm]\n+ )\n+ assert ope_.estimate_policy_values(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ ) == {\n+ \"dm\": mock_policy_value\n+ }, \"OffPolicyEvaluation.estimate_policy_values ([DirectMethod]) returns a wrong value\"\n+ # multiple ope estimators\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback, ope_estimators=[dm, ipw]\n+ )\n+ assert ope_.estimate_policy_values(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ ) == {\n+ \"dm\": mock_policy_value,\n+ \"ipw\": mock_policy_value + ipw.eps,\n+ }, \"OffPolicyEvaluation.estimate_policy_values ([DirectMethod, IPW]) returns a wrong value\"\n+\n+\n+# alpha, n_bootstrap_samples, random_state, description\n+invalid_input_of_estimate_intervals = [\n+ (0.05, 100, \"s\", \"random_state must be an integer\"),\n+ (0.05, -1, 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.05, \"s\", 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (1.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (\"0\", 1, 1, \"alpha must be a positive float (< 1)\"),\n+]\n+\n+valid_input_of_estimate_intervals = [\n+ (0.05, 100, 1, \"random_state is 1\"),\n+ (0.05, 1, 1, \"n_bootstrap_samples is 1\"),\n+]\n+\n+\[email protected](\n+ \"action_dist, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description_2\",\n+ invalid_input_of_estimate_intervals,\n+)\n+def test_meta_estimate_intervals_using_invalid_input_data(\n+ action_dist,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description_2: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of estimate_intervals using invalid data\n+ \"\"\"\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback, ope_estimators=[dm]\n+ )\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = ope_.estimate_intervals(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ # estimate_intervals function is called in summarize_off_policy_estimates\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = ope_.summarize_off_policy_estimates(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"action_dist, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description_2\",\n+ valid_input_of_estimate_intervals,\n+)\n+def test_meta_estimate_intervals_using_valid_input_data(\n+ action_dist,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ alpha: float,\n+ n_bootstrap_samples: int,\n+ random_state: int,\n+ description_2: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of estimate_intervals using valid data\n+ \"\"\"\n+ # single ope estimator\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback, ope_estimators=[dm]\n+ )\n+ assert ope_.estimate_intervals(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ ) == {\n+ \"dm\": mock_confidence_interval\n+ }, \"OffPolicyEvaluation.estimate_intervals ([DirectMethod]) returns a wrong value\"\n+ # multiple ope estimators\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback, ope_estimators=[dm, ipw]\n+ )\n+ assert ope_.estimate_intervals(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ ) == {\n+ \"dm\": mock_confidence_interval,\n+ \"ipw\": {k: v + ipw.eps for k, v in mock_confidence_interval.items()},\n+ }, \"OffPolicyEvaluation.estimate_intervals ([DirectMethod, IPW]) returns a wrong value\"\n+\n+\[email protected](\n+ \"action_dist, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description_2\",\n+ valid_input_of_estimate_intervals,\n+)\ndef test_meta_summarize_off_policy_estimates(\n- synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n+ action_dist,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ alpha: float,\n+ n_bootstrap_samples: int,\n+ random_state: int,\n+ description_2: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n) -> None:\n+ \"\"\"\n+ Test the response of summarize_off_policy_estimates using valid data\n+ \"\"\"\nope_ = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw, ipw3]\n)\n- value, interval = ope_.summarize_off_policy_estimates(random_action_dist)\n+ value, interval = ope_.summarize_off_policy_estimates(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\nexpected_value = pd.DataFrame(\n{\n\"ipw\": mock_policy_value + ipw.eps,\n@@ -311,40 +567,120 @@ def test_meta_summarize_off_policy_estimates(\nassert_frame_equal(interval, expected_interval), \"Invalid summarization (interval)\"\n-def test_meta_evaluate_performance_of_estimators(\n- synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n+invalid_input_of_evaluation_performance = [\n+ (\"foo\", 0.3, \"metric must be either 'relative-ee' or 'se'\"),\n+ (\"se\", 1, \"ground_truth_policy_value must be a float\"),\n+ (\"se\", \"a\", \"ground_truth_policy_value must be a float\"),\n+ (\n+ \"relative-ee\",\n+ 0.0,\n+ \"ground_truth_policy_value must be non-zero when metric is relative-ee\",\n+ ),\n+]\n+\n+valid_input_of_evaluation_performance = [\n+ (\"se\", 0.0, \"metric is se and ground_truth_policy_value is 0.0\"),\n+ (\"relative-ee\", 1.0, \"metric is relative-ee and ground_truth_policy_value is 1.0\"),\n+]\n+\n+\[email protected](\n+ \"action_dist, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"metric, ground_truth_policy_value, description_2\",\n+ invalid_input_of_evaluation_performance,\n+)\n+def test_meta_evaluate_performance_of_estimators_using_invalid_input_data(\n+ action_dist,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ metric,\n+ ground_truth_policy_value,\n+ description_2: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n) -> None:\n- gt = 0.5\n+ \"\"\"\n+ Test the response of evaluate_performance_of_estimators using invalid data\n+ \"\"\"\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback, ope_estimators=[dm]\n+ )\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = ope_.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_dist=action_dist,\n+ metric=metric,\n+ )\n+ # estimate_intervals function is called in summarize_off_policy_estimates\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = ope_.summarize_estimators_comparison(\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_dist=action_dist,\n+ metric=metric,\n+ )\n+\n+\[email protected](\n+ \"action_dist, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"metric, ground_truth_policy_value, description_2\",\n+ valid_input_of_evaluation_performance,\n+)\n+def test_meta_evaluate_performance_of_estimators_using_valid_input_data(\n+ action_dist,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ metric,\n+ ground_truth_policy_value,\n+ description_2: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of evaluate_performance_of_estimators using valid data\n+ \"\"\"\n+ if metric == \"relative-ee\":\n# calculate relative-ee\neval_metric_ope_dict = {\n- \"ipw\": np.abs((mock_policy_value + ipw.eps - gt) / gt),\n- \"ipw3\": np.abs((mock_policy_value + ipw3.eps - gt) / gt),\n+ \"ipw\": np.abs(\n+ (mock_policy_value + ipw.eps - ground_truth_policy_value)\n+ / ground_truth_policy_value\n+ ),\n+ \"ipw3\": np.abs(\n+ (mock_policy_value + ipw3.eps - ground_truth_policy_value)\n+ / ground_truth_policy_value\n+ ),\n+ }\n+ else:\n+ # calculate se\n+ eval_metric_ope_dict = {\n+ \"ipw\": (mock_policy_value + ipw.eps - ground_truth_policy_value) ** 2,\n+ \"ipw3\": (mock_policy_value + ipw3.eps - ground_truth_policy_value) ** 2,\n}\n# check performance estimators\nope_ = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw, ipw3]\n)\nperformance = ope_.evaluate_performance_of_estimators(\n- ground_truth_policy_value=gt,\n- action_dist=random_action_dist,\n- metric=\"relative-ee\",\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_dist=action_dist,\n+ metric=metric,\n)\nfor k, v in performance.items():\nassert k in eval_metric_ope_dict, \"Invalid key of performance response\"\nassert v == eval_metric_ope_dict[k], \"Invalid value of performance response\"\n- # zero division error when using relative-ee\n- with pytest.raises(ZeroDivisionError, match=r\"float division by zero\"):\n- _ = ope_.evaluate_performance_of_estimators(\n- ground_truth_policy_value=0.0,\n- action_dist=random_action_dist,\n- metric=\"relative-ee\",\n- )\n- # check summarization\nperformance_df = ope_.summarize_estimators_comparison(\n- ground_truth_policy_value=gt,\n- action_dist=random_action_dist,\n- metric=\"relative-ee\",\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_dist=action_dist,\n+ metric=metric,\n)\nassert_frame_equal(\n- performance_df, pd.DataFrame(eval_metric_ope_dict, index=[\"relative-ee\"]).T\n+ performance_df, pd.DataFrame(eval_metric_ope_dict, index=[metric]).T\n), \"Invalid summarization (performance)\"\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix obp modules to test meta.py
641,005
31.01.2021 23:39:29
-32,400
258b683b2286b8d63710a782f945e13a4e328ebc
remove eval_size parameter
[ { "change_type": "MODIFY", "old_path": "obp/dataset/multiclass.py", "new_path": "obp/dataset/multiclass.py", "diff": "@@ -217,10 +217,6 @@ class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\nParameters\n-----------\n- eval_size: float or int, default=0.25\n- If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n- If int, represents the absolute number of test samples.\n-\nrandom_state: int, default=None\nControls the random seed in sampling actions.\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
remove eval_size parameter
641,014
01.02.2021 07:32:31
-32,400
096b17d18afc40b2b85c31bf69b1569417bb6b22
del switch-ipw from examples/multiclass
[ { "change_type": "MODIFY", "old_path": "examples/multiclass/README.md", "new_path": "examples/multiclass/README.md", "diff": "@@ -14,11 +14,10 @@ In the following, we evaluate the estimation performances of\n- Self-Normalized Inverse Probability Weighting (SNIPW)\n- Doubly Robust (DR)\n- Self-Normalized Doubly Robust (SNDR)\n-- Switch Inverse Probability Weighting (Switch-IPW)\n- Switch Doubly Robust (Switch-DR)\n- Doubly Robust with Optimistic Shrinkage (DRos)\n-For Switch-IPW, Switch-DR, and DRos, we try some different values of hyperparameters.\n+For Switch-DR and DRos, we try some different values of hyperparameters.\nSee [our documentation](https://zr-obp.readthedocs.io/en/latest/estimators.html) for the details about these estimators.\n### Files\n@@ -77,8 +76,6 @@ python evaluate_off_policy_estimators.py\\\n# snipw 0.006797 0.004094\n# dr 0.007780 0.004492\n# sndr 0.007210 0.004089\n-# switch-ipw (tau=1) 0.255925 0.024125\n-# switch-ipw (tau=100) 0.013286 0.008496\n# switch-dr (tau=1) 0.173282 0.020025\n# switch-dr (tau=100) 0.007780 0.004492\n# dr-os (lambda=1) 0.079629 0.014008\n" }, { "change_type": "MODIFY", "old_path": "examples/multiclass/evaluate_off_policy_estimators.py", "new_path": "examples/multiclass/evaluate_off_policy_estimators.py", "diff": "@@ -20,7 +20,6 @@ from obp.ope import (\nDoublyRobust,\nSelfNormalizedDoublyRobust,\nSwitchDoublyRobust,\n- SwitchInverseProbabilityWeighting,\nDoublyRobustWithShrinkage,\n)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
del switch-ipw from examples/multiclass
641,014
01.02.2021 08:36:48
-32,400
2e6a7b342fcbf7754321a7b0ebfe9ecc99f35218
add calc_ground_truth_policy_value
[ { "change_type": "MODIFY", "old_path": "obp/dataset/synthetic.py", "new_path": "obp/dataset/synthetic.py", "diff": "@@ -248,6 +248,39 @@ class SyntheticBanditDataset(BaseSyntheticBanditDataset):\npscore=pscore,\n)\n+ def calc_ground_truth_policy_value(self, expected_reward: np.ndarray, action_dist: np.ndarray) -> float:\n+ \"\"\"Calculate the policy value of given action distribution on the given expected_reward.\n+\n+ Parameters\n+ -----------\n+ expected_reward: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+ This is often the expected_reward of the test set of logged bandit feedback data.\n+\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ Returns\n+ ----------\n+ policy_value: float\n+ The policy value of the given action distribution on the given bandit feedback data.\n+\n+ \"\"\"\n+ if not isinstance(expected_reward, np.ndarray):\n+ raise ValueError(\"expected_reward must be ndarray\")\n+ if not isinstance(action_dist, np.ndarray):\n+ raise ValueError(\"action_dist must be ndarray\")\n+ if action_dist.ndim != 3:\n+ raise ValueError(\n+ f\"action_dist must be 3-dimensional, but is {action_dist.ndim}.\"\n+ )\n+ if (expected_reward.shape[0] != action_dist.shape[0]):\n+ raise ValueError(\"the size of axis 0 of expected_reward must be the same as that of action_dist\")\n+ if (expected_reward.shape[1] != action_dist.shape[1]):\n+ raise ValueError(\"the size of axis 1 of expected_reward must be the same as that of action_dist\")\n+\n+ return np.average(expected_reward, weights=action_dist[:, :, 0], axis=1).mean()\n+\ndef logistic_reward_function(\ncontext: np.ndarray, action_context: np.ndarray, random_state: Optional[int] = None,\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add calc_ground_truth_policy_value
641,014
01.02.2021 08:37:06
-32,400
2a2cc94159a4c67850c14ea90a9892d34d157877
add tests of calc_ground_truth_policy_value
[ { "change_type": "MODIFY", "old_path": "tests/dataset/test_synthetic.py", "new_path": "tests/dataset/test_synthetic.py", "diff": "@@ -90,6 +90,62 @@ def test_synthetic_obtain_batch_bandit_feedback():\n)\n+# expected_reward, action_dist, description\n+invalid_input_of_calc_policy_value = [\n+ (\n+ np.ones((2, 3)),\n+ np.ones((3, 3, 3)),\n+ \"the size of axis 0 of expected_reward must be the same as that of action_dist\",\n+ ),\n+ (\n+ np.ones((2, 3)),\n+ np.ones((2, 2, 3)),\n+ \"the size of axis 1 of expected_reward must be the same as that of action_dist\",\n+ ),\n+ (\"3\", np.ones((2, 2, 3)), \"expected_reward must be ndarray\"),\n+ (None, np.ones((2, 2, 3)), \"expected_reward must be ndarray\"),\n+ (np.ones((2, 3)), np.ones((2, 3)), \"action_dist must be 3-dimensional, but is 2.\"),\n+ (np.ones((2, 3)), \"3\", \"action_dist must be ndarray\"),\n+ (np.ones((2, 3)), None, \"action_dist must be ndarray\"),\n+]\n+\n+valid_input_of_calc_policy_value = [\n+ (np.ones((2, 3)), np.ones((2, 3, 1)), \"valid shape\",),\n+]\n+\n+\[email protected](\n+ \"expected_reward, action_dist, description\", invalid_input_of_calc_policy_value,\n+)\n+def test_synthetic_calc_policy_value_using_invalid_inputs(\n+ expected_reward, action_dist, description,\n+):\n+ n_actions = 10\n+ dataset = SyntheticBanditDataset(n_actions=n_actions)\n+\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dataset.calc_ground_truth_policy_value(\n+ expected_reward=expected_reward, action_dist=action_dist\n+ )\n+\n+\[email protected](\n+ \"expected_reward, action_dist, description\", valid_input_of_calc_policy_value,\n+)\n+def test_synthetic_calc_policy_value_using_valid_inputs(\n+ expected_reward, action_dist, description,\n+):\n+ n_actions = 10\n+ dataset = SyntheticBanditDataset(n_actions=n_actions)\n+\n+ policy_value = dataset.calc_ground_truth_policy_value(\n+ expected_reward=expected_reward, action_dist=action_dist\n+ )\n+ assert isinstance(\n+ policy_value, float\n+ ), \"Invalid response of calc_ground_truth_policy_value\"\n+\n+\ndef test_synthetic_logistic_reward_function():\n# context\nwith pytest.raises(ValueError):\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add tests of calc_ground_truth_policy_value
641,014
02.02.2021 06:17:19
-32,400
6a3656844d796c88d5cbb0893b0475c43f312954
update quickstart obd
[ { "change_type": "MODIFY", "old_path": "examples/quickstart/obd.ipynb", "new_path": "examples/quickstart/obd.ipynb", "diff": "\"source\": [\n\"# Quickstart Example with Open Bandit Pipeline\\n\",\n\"---\\n\",\n- \"This notebook demonstrates an example of conducting OPE of Bernoulli Thompson Sampling (BernoulliTS) as an evaluation policy using some OPE estimators and logged bandit feedback generated by running the Random policy (behavior policy) on the ZOZOTOWN platform. It also implements the data-driven evaluation and comparison of different OPE estimators. Please refer to [the documentation](https://zr-obp.readthedocs.io/en/latest/evaluation_ope.html) for the details about the evaluation of OPE protocol.\\n\",\n+ \"This notebook demonstrates an example of conducting OPE of Bernoulli Thompson Sampling (BernoulliTS) as an evaluation policy using some OPE estimators and logged bandit feedback generated by running the Random policy (behavior policy) on the ZOZOTOWN platform. It also implements the data-driven evaluation and comparison of different OPE estimators. Please clone [the obp repository](https://github.com/st-tech/zr-obp) and download [the small sized Open Bandit Dataset](https://github.com/st-tech/zr-obp/tree/master/obd) to run this notebook.\\n\",\n\"\\n\",\n\"Our example with the Open Bandit Dataset contains the following four major steps:\\n\",\n\"- (1) Data Loading and Preprocessing\\n\",\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
update quickstart obd
641,014
05.02.2021 06:16:21
-32,400
002f3b480458e872eb94a54b4b51535863ad06d7
add example data to obp
[ { "change_type": "MODIFY", "old_path": "MANIFEST.in", "new_path": "MANIFEST.in", "diff": "include *.txt\ninclude *.md\nrecursive-include obp/policy/conf *\n+recursive-include obp/dataset/obd *\ninclude LICENSE\n" }, { "change_type": "MODIFY", "old_path": "setup.py", "new_path": "setup.py", "diff": "@@ -12,7 +12,7 @@ print(__version__)\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\nlong_description = f.read()\n-package_data_list = [\"obp/policy/conf/prior_bts.yaml\"]\n+package_data_list = [\"obp/policy/conf/prior_bts.yaml\", \"obp/dataset/obd/\"]\nsetup(\nname=\"obp\",\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add example data to obp
641,014
05.02.2021 06:21:53
-32,400
406eb832bac34a51037a343c1d81ac8b4d72a673
add option to download example obd
[ { "change_type": "MODIFY", "old_path": "obp/dataset/real.py", "new_path": "obp/dataset/real.py", "diff": "@@ -34,7 +34,8 @@ class OpenBanditDataset(BaseRealBanditDataset):\nOne of the three possible campaigns considered in ZOZOTOWN, \"all\", \"men\", and \"women\".\ndata_path: Path, default=Path('./obd')\n- Path that stores Open Bandit Dataset.\n+ Path where the Open Bandit Dataset exists.\n+ When `data_path` is not given, this class downloads the example small sized version of the dataset.\ndataset_name: str, default='obd'\nName of the dataset.\n@@ -48,7 +49,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\nbehavior_policy: str\ncampaign: str\n- data_path: Path = Path(\"./obd\")\n+ data_path: Path = Path(__file__).parent / \"obd\"\ndataset_name: str = \"obd\"\ndef __post_init__(self) -> None:\n@@ -62,9 +63,11 @@ class OpenBanditDataset(BaseRealBanditDataset):\n\"men\",\n\"women\",\n], f\"campaign must be one of 'all', 'men', and 'women', but {self.campaign} is given\"\n- assert isinstance(self.data_path, Path), f\"data_path must be a Path type\"\n-\n+ if not isinstance(self.data_path, Path):\n+ raise ValueError(\"data_path must be a Path type\")\nself.data_path = self.data_path / self.behavior_policy / self.campaign\n+\n+ self.data_path = Path(__file__).parent / \"obd\" / self.behavior_policy / self.campaign\nself.raw_data_file = f\"{self.campaign}.csv\"\nself.load_raw_data()\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add option to download example obd
641,006
06.02.2021 16:45:05
-32,400
15ffa9b70b57b9e7e23c02f3974045beab240943
fix default value of position argument
[ { "change_type": "MODIFY", "old_path": "obp/ope/estimators.py", "new_path": "obp/ope/estimators.py", "diff": "@@ -67,8 +67,8 @@ class ReplayMethod(BaseOffPolicyEstimator):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\naction_dist: np.ndarray,\n+ position: np.ndarray,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate rewards for each round.\n@@ -81,6 +81,9 @@ class ReplayMethod(BaseOffPolicyEstimator):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (must be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\nposition: array-like, shape (n_rounds,)\nPositions of each round in the given logged bandit feedback.\n@@ -102,8 +105,8 @@ class ReplayMethod(BaseOffPolicyEstimator):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\naction_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> float:\n\"\"\"Estimate policy value of an evaluation policy.\n@@ -116,7 +119,10 @@ class ReplayMethod(BaseOffPolicyEstimator):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (must be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\nReturns\n@@ -125,6 +131,8 @@ class ReplayMethod(BaseOffPolicyEstimator):\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nreturn self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -136,8 +144,8 @@ class ReplayMethod(BaseOffPolicyEstimator):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\naction_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 100,\nrandom_state: Optional[int] = None,\n@@ -153,7 +161,10 @@ class ReplayMethod(BaseOffPolicyEstimator):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (must be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n@@ -171,6 +182,8 @@ class ReplayMethod(BaseOffPolicyEstimator):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nestimated_round_rewards = self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -247,6 +260,9 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\nReturns\n----------\nestimated_rewards: array-like, shape (n_rounds,)\n@@ -260,9 +276,9 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate policy value of an evaluation policy.\n@@ -275,21 +291,23 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nV_hat: float\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nreturn self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -302,9 +320,9 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 10000,\nrandom_state: Optional[int] = None,\n@@ -320,9 +338,6 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -330,6 +345,9 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nAction choice probabilities\nby the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nalpha: float, default=0.05\nP-value.\n@@ -345,6 +363,8 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nestimated_round_rewards = self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -520,30 +540,32 @@ class DirectMethod(BaseOffPolicyEstimator):\ndef estimate_policy_value(\nself,\n- position: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> float:\n\"\"\"Estimate policy value of an evaluation policy.\nParameters\n----------\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nV_hat: float\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nreturn self._estimate_round_rewards(\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n@@ -552,9 +574,9 @@ class DirectMethod(BaseOffPolicyEstimator):\ndef estimate_interval(\nself,\n- position: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 10000,\nrandom_state: Optional[int] = None,\n@@ -564,15 +586,15 @@ class DirectMethod(BaseOffPolicyEstimator):\nParameters\n----------\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nalpha: float, default=0.05\nP-value.\n@@ -588,6 +610,8 @@ class DirectMethod(BaseOffPolicyEstimator):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nestimated_round_rewards = self._estimate_round_rewards(\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n@@ -708,10 +732,10 @@ class DoublyRobust(InverseProbabilityWeighting):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n) -> float:\n\"\"\"Estimate policy value of an evaluation policy.\n@@ -723,9 +747,6 @@ class DoublyRobust(InverseProbabilityWeighting):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -735,12 +756,17 @@ class DoublyRobust(InverseProbabilityWeighting):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nV_hat: float\nEstimated policy value by the DR estimator.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nreturn self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -754,10 +780,10 @@ class DoublyRobust(InverseProbabilityWeighting):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 10000,\nrandom_state: Optional[int] = None,\n@@ -773,9 +799,6 @@ class DoublyRobust(InverseProbabilityWeighting):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -785,6 +808,9 @@ class DoublyRobust(InverseProbabilityWeighting):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nalpha: float, default=0.05\nP-value.\n@@ -800,6 +826,8 @@ class DoublyRobust(InverseProbabilityWeighting):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nestimated_round_rewards = self._estimate_round_rewards(\nreward=reward,\naction=action,\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix default value of position argument
641,003
06.02.2021 21:29:52
-32,400
f9ec739bbc66da6f2a7a6bb1e781f53b70c53450
refactor base dataset class
[ { "change_type": "MODIFY", "old_path": "obp/dataset/base.py", "new_path": "obp/dataset/base.py", "diff": "from abc import ABCMeta, abstractmethod\n-class BaseRealBanditDataset(metaclass=ABCMeta):\n+class BaseBanditDataset(metaclass=ABCMeta):\n+ \"\"\"Base Class for Synthetic Bandit Dataset.\"\"\"\n+\n+ @abstractmethod\n+ def obtain_batch_bandit_feedback(self) -> None:\n+ \"\"\"Obtain batch logged bandit feedback.\"\"\"\n+ raise NotImplementedError\n+\n+\n+class BaseRealBanditDataset(BaseBanditDataset):\n\"\"\"Base Class for Real-World Bandit Dataset.\"\"\"\n@abstractmethod\n@@ -17,17 +26,3 @@ class BaseRealBanditDataset(metaclass=ABCMeta):\ndef pre_process(self) -> None:\n\"\"\"Preprocess raw dataset.\"\"\"\nraise NotImplementedError\n-\n- @abstractmethod\n- def obtain_batch_bandit_feedback(self) -> None:\n- \"\"\"Obtain batch logged bandit feedback.\"\"\"\n- raise NotImplementedError\n-\n-\n-class BaseSyntheticBanditDataset(metaclass=ABCMeta):\n- \"\"\"Base Class for Synthetic Bandit Dataset.\"\"\"\n-\n- @abstractmethod\n- def obtain_batch_bandit_feedback(self) -> None:\n- \"\"\"Obtain batch logged bandit feedback.\"\"\"\n- raise NotImplementedError\n" }, { "change_type": "MODIFY", "old_path": "obp/dataset/multiclass.py", "new_path": "obp/dataset/multiclass.py", "diff": "@@ -11,12 +11,12 @@ from sklearn.base import ClassifierMixin, is_classifier, clone\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import check_random_state, check_X_y\n-from .base import BaseSyntheticBanditDataset\n+from .base import BaseBanditDataset\nfrom ..types import BanditFeedback\n@dataclass\n-class MultiClassToBanditReduction(BaseSyntheticBanditDataset):\n+class MultiClassToBanditReduction(BaseBanditDataset):\n\"\"\"Class for handling multi-class classification data as logged bandit feedback data.\nNote\n" }, { "change_type": "MODIFY", "old_path": "obp/dataset/synthetic.py", "new_path": "obp/dataset/synthetic.py", "diff": "@@ -9,13 +9,13 @@ import numpy as np\nfrom scipy.stats import truncnorm\nfrom sklearn.utils import check_random_state\n-from .base import BaseSyntheticBanditDataset\n+from .base import BaseBanditDataset\nfrom ..types import BanditFeedback\nfrom ..utils import sigmoid, softmax\n@dataclass\n-class SyntheticBanditDataset(BaseSyntheticBanditDataset):\n+class SyntheticBanditDataset(BaseBanditDataset):\n\"\"\"Class for generating synthetic bandit dataset.\nNote\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
refactor base dataset class
641,006
06.02.2021 23:00:27
-32,400
992ba887ec5c3ad3caef4305164e68f9297c4f65
fix input validation of estimators and utils
[ { "change_type": "MODIFY", "old_path": "obp/ope/estimators.py", "new_path": "obp/ope/estimators.py", "diff": "@@ -8,7 +8,7 @@ from typing import Dict, Optional\nimport numpy as np\n-from ..utils import estimate_confidence_interval_by_bootstrap\n+from ..utils import estimate_confidence_interval_by_bootstrap, check_ope_inputs\n@dataclass\n@@ -131,8 +131,17 @@ class ReplayMethod(BaseOffPolicyEstimator):\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+\n+ check_ope_inputs(\n+ action_dist=action_dist, position=position, action=action, reward=reward\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n+\nreturn self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -182,8 +191,17 @@ class ReplayMethod(BaseOffPolicyEstimator):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+\n+ check_ope_inputs(\n+ action_dist=action_dist, position=position, action=action, reward=reward\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n+\nestimated_round_rewards = self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -306,8 +324,23 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ check_ope_inputs(\n+ action_dist=action_dist,\n+ position=position,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n+\nreturn self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -363,8 +396,23 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ check_ope_inputs(\n+ action_dist=action_dist,\n+ position=position,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n+\nestimated_round_rewards = self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -564,8 +612,17 @@ class DirectMethod(BaseOffPolicyEstimator):\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n+ if not isinstance(estimated_rewards_by_reg_model, np.ndarray):\n+ raise ValueError(\"estimated_rewards_by_reg_model must be ndarray\")\n+\n+ check_ope_inputs(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ position=position,\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n+\nreturn self._estimate_round_rewards(\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n@@ -610,8 +667,17 @@ class DirectMethod(BaseOffPolicyEstimator):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ if not isinstance(estimated_rewards_by_reg_model, np.ndarray):\n+ raise ValueError(\"estimated_rewards_by_reg_model must be ndarray\")\n+\n+ check_ope_inputs(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ position=position,\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n+\nestimated_round_rewards = self._estimate_round_rewards(\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n@@ -765,8 +831,26 @@ class DoublyRobust(InverseProbabilityWeighting):\nEstimated policy value by the DR estimator.\n\"\"\"\n+ if not isinstance(estimated_rewards_by_reg_model, np.ndarray):\n+ raise ValueError(\"estimated_rewards_by_reg_model must be ndarray\")\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ check_ope_inputs(\n+ action_dist=action_dist,\n+ position=position,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n+\nreturn self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -826,8 +910,26 @@ class DoublyRobust(InverseProbabilityWeighting):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ if not isinstance(estimated_rewards_by_reg_model, np.ndarray):\n+ raise ValueError(\"estimated_rewards_by_reg_model must be ndarray\")\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ check_ope_inputs(\n+ action_dist=action_dist,\n+ position=position,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n+\nestimated_round_rewards = self._estimate_round_rewards(\nreward=reward,\naction=action,\n@@ -965,7 +1067,7 @@ class SwitchInverseProbabilityWeighting(DoublyRobust):\n----------\ntau: float, default=1\nSwitching hyperparameter. When importance weight is larger than this parameter, the DM estimator is applied, otherwise the IPW estimator is applied.\n- This hyperparameter should be larger than 1., otherwise it is meaningless.\n+ This hyperparameter should be larger than or equal to 0, otherwise it is meaningless.\nestimator_name: str, default='switch-ipw'.\nName of off-policy estimator.\n@@ -980,14 +1082,19 @@ class SwitchInverseProbabilityWeighting(DoublyRobust):\n\"\"\"\n- tau: float = 1\n+ tau: float = 1.0\nestimator_name: str = \"switch-ipw\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- assert (\n- self.tau >= 0.0\n- ), f\"switching hyperparameter should be larger than 1, but {self.tau} is given\"\n+ if not isinstance(self.tau, float):\n+ raise ValueError(\n+ f\"switching hyperparameter must be float, but {self.tau} is given\"\n+ )\n+ if self.tau < 0.0:\n+ raise ValueError(\n+ f\"switching hyperparameter must be larger than or equal to zero, but {self.tau} is given\"\n+ )\ndef _estimate_round_rewards(\nself,\n@@ -1084,14 +1191,19 @@ class SwitchDoublyRobust(DoublyRobust):\n\"\"\"\n- tau: float = 1\n+ tau: float = 1.0\nestimator_name: str = \"switch-dr\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- assert (\n- self.tau >= 0.0\n- ), f\"switching hyperparameter must be larger than or equal to zero, but {self.tau} is given\"\n+ if not isinstance(self.tau, float):\n+ raise ValueError(\n+ f\"switching hyperparameter must be float, but {self.tau} is given\"\n+ )\n+ if self.tau < 0.0:\n+ raise ValueError(\n+ f\"switching hyperparameter must be larger than or equal to zero, but {self.tau} is given\"\n+ )\ndef _estimate_round_rewards(\nself,\n@@ -1207,9 +1319,14 @@ class DoublyRobustWithShrinkage(DoublyRobust):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- assert (\n- self.lambda_ >= 0.0\n- ), f\"shrinkage hyperparameter must be larger than or equal to zero, but {self.lambda_} is given\"\n+ if not isinstance(self.lambda_, float):\n+ raise ValueError(\n+ f\"shrinkage hyperparameter must be float, but {self.lambda_} is given\"\n+ )\n+ if self.lambda_ < 0.0:\n+ raise ValueError(\n+ f\"shrinkage hyperparameter must be larger than or equal to zero, but {self.lambda_} is given\"\n+ )\ndef _estimate_round_rewards(\nself,\n" }, { "change_type": "MODIFY", "old_path": "obp/utils.py", "new_path": "obp/utils.py", "diff": "@@ -15,7 +15,7 @@ def check_confidence_interval_arguments(\nalpha: float = 0.05,\nn_bootstrap_samples: int = 10000,\nrandom_state: Optional[int] = None,\n-) -> None:\n+) -> Optional[ValueError]:\n\"\"\"Check confidence interval arguments.\nParameters\n@@ -205,7 +205,7 @@ def check_bandit_feedback_inputs(\nposition: Optional[np.ndarray] = None,\npscore: Optional[np.ndarray] = None,\naction_context: Optional[np.ndarray] = None,\n-) -> Optional[AssertionError]:\n+) -> Optional[ValueError]:\n\"\"\"Check inputs for bandit learning or simulation.\nParameters\n@@ -230,35 +230,156 @@ def check_bandit_feedback_inputs(\nContext vectors characterizing each action.\n\"\"\"\n- assert isinstance(context, np.ndarray), \"context must be ndarray\"\n- assert context.ndim == 2, \"context must be 2-dimensional\"\n- assert isinstance(action, np.ndarray), \"action must be ndarray\"\n- assert action.ndim == 1, \"action must be 1-dimensional\"\n- assert isinstance(reward, np.ndarray), \"reward must be ndarray\"\n- assert reward.ndim == 1, \"reward must be 1-dimensional\"\n+ if not isinstance(context, np.ndarray):\n+ raise ValueError(\"context must be ndarray\")\n+ if context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+ if action.ndim != 1:\n+ raise ValueError(\"action must be 1-dimensional\")\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional\")\nif pscore is not None:\n- assert isinstance(pscore, np.ndarray), \"pscore must be ndarray\"\n- assert pscore.ndim == 1, \"pscore must be 1-dimensional\"\n- assert (\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+ if pscore.ndim != 1:\n+ raise ValueError(\"pscore must be 1-dimensional\")\n+ if not (\ncontext.shape[0] == action.shape[0] == reward.shape[0] == pscore.shape[0]\n- ), \"context, action, reward, and pscore must be the same size.\"\n+ ):\n+ raise ValueError(\n+ \"context, action, reward, and pscore must be the same size.\"\n+ )\nif position is not None:\n- assert isinstance(position, np.ndarray), \"position must be ndarray\"\n- assert position.ndim == 1, \"position must be 1-dimensional\"\n- assert (\n+ if not isinstance(position, np.ndarray):\n+ raise ValueError(\"position must be ndarray\")\n+ if position.ndim != 1:\n+ raise ValueError(\"position must be 1-dimensional\")\n+ if not (\ncontext.shape[0] == action.shape[0] == reward.shape[0] == position.shape[0]\n- ), \"context, action, reward, and position must be the same size.\"\n+ ):\n+ raise ValueError(\n+ \"context, action, reward, and position must be the same size.\"\n+ )\nelse:\n- assert (\n- context.shape[0] == action.shape[0] == reward.shape[0]\n- ), \"context, action, and reward must be the same size.\"\n+ if not (context.shape[0] == action.shape[0] == reward.shape[0]):\n+ raise ValueError(\"context, action, and reward must be the same size.\")\nif action_context is not None:\n- assert isinstance(action_context, np.ndarray), \"action_context must be ndarray\"\n- assert action_context.ndim == 2, \"action_context must be 2-dimensional\"\n- assert (action.max() + 1) == action_context.shape[\n- 0\n- ], \"the number of action and the size of the first dimension of action_context must be same.\"\n+ if not isinstance(action_context, np.ndarray):\n+ raise ValueError(\"action_context must be ndarray\")\n+ if action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional\")\n+ if (action.max() + 1) != action_context.shape[0]:\n+ raise ValueError(\n+ \"the number of action and the size of the first dimension of action_context must be same.\"\n+ )\n+\n+\n+def check_ope_inputs(\n+ action_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ action: Optional[np.ndarray] = None,\n+ reward: Optional[np.ndarray] = None,\n+ pscore: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+) -> Optional[AssertionError]:\n+ \"\"\"Check inputs for bandit learning or simulation.\n+\n+ Parameters\n+ -----------\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\n+ action: array-like, shape (n_rounds,), default=None\n+ Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ reward: array-like, shape (n_rounds,), default=None\n+ Observed rewards (or outcome) in each round, i.e., :math:`r_t`.\n+\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Propensity scores, the probability of selecting each action by behavior policy,\n+ in the given logged bandit feedback.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Expected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ \"\"\"\n+ # action_dist\n+ if not isinstance(action_dist, np.ndarray):\n+ raise ValueError(\"action_dist must be ndarray\")\n+ if action_dist.ndim != 3:\n+ raise ValueError(\n+ f\"action_dist.ndim must be 3-dimensional, but is {action_dist.ndim}\"\n+ )\n+ if not np.allclose(action_dist.sum(axis=1), 1):\n+ raise ValueError(\"action_dist must be a probability distribution\")\n+\n+ # position\n+ if position is not None:\n+ if not isinstance(position, np.ndarray):\n+ raise ValueError(\"position must be ndarray\")\n+ if position.ndim != 1:\n+ raise ValueError(\"position must be 1-dimensional\")\n+ if not (position.shape[0] == action_dist.shape[0]):\n+ raise ValueError(\n+ \"the first dimension of position and the first dimension of action_dist must be the same\"\n+ )\n+ if not (position.dtype == int and position.min() >= 0):\n+ raise ValueError(\"position elements must be non-negative integers\")\n+ if position.max() >= action_dist.shape[2]:\n+ raise ValueError(\n+ \"position elements must be smaller than the third dimension of action_dist\"\n+ )\n+ elif action_dist.shape[2] > 1:\n+ raise ValueError(\n+ \"position elements must be given when the third dimension of action_dist is greator than 1\"\n+ )\n+\n+ # estimated_rewards_by_reg_model\n+ if (\n+ estimated_rewards_by_reg_model is not None\n+ and estimated_rewards_by_reg_model.shape != action_dist.shape\n+ ):\n+ raise ValueError(\n+ \"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\"\n+ )\n+\n+ # action, reward\n+ if action is not None or reward is not None:\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+ if action.ndim != 1:\n+ raise ValueError(\"action must be 1-dimensional\")\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional\")\n+ if not (action.shape[0] == reward.shape[0]):\n+ raise ValueError(\"action and reward must be the same size.\")\n+ if not (action.dtype == int and action.min() >= 0):\n+ raise ValueError(\"action elements must be non-negative integers\")\n+ if action.max() >= action_dist.shape[1]:\n+ raise ValueError(\n+ \"action elements must be smaller than the second dimension of action_dist\"\n+ )\n+\n+ # pscpre\n+ if pscore is not None:\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+ if pscore.ndim != 1:\n+ raise ValueError(\"pscore must be 1-dimensional\")\n+ if not (action.shape[0] == reward.shape[0] == pscore.shape[0]):\n+ raise ValueError(\"action, reward, and pscore must be the same size.\")\n+ if np.any(pscore <= 0):\n+ raise ValueError(\"pscore must be positive\")\ndef sigmoid(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/conftest.py", "new_path": "tests/ope/conftest.py", "diff": "@@ -99,3 +99,9 @@ def random_action_dist(synthetic_bandit_feedback) -> np.ndarray:\nn_rounds=synthetic_bandit_feedback[\"n_rounds\"]\n)\nreturn action_dist\n+\n+\n+def generate_action_dist(i, j, k):\n+ x = np.random.uniform(size=(i, j, k))\n+ action_dist = x / x.sum(axis=1)[:, np.newaxis, :]\n+ return action_dist\n" }, { "change_type": "MODIFY", "old_path": "tests/policy/test_offline.py", "new_path": "tests/policy/test_offline.py", "diff": "@@ -78,7 +78,7 @@ def test_opl_fit():\nlearner.fit(context=context, action=action, reward=reward, position=position)\n# inconsistency with the shape\n- with pytest.raises(AssertionError):\n+ with pytest.raises(ValueError):\nlearner = IPWLearner(n_actions=2, len_list=2)\nvariant_context = np.array([1.0, 1.0, 1.0, 1.0])\nlearner.fit(\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix input validation of estimators and utils
641,006
06.02.2021 23:01:13
-32,400
1db3b59e5f6c97952c3270f613778fd95d7f976c
add validation test in test_all_estimators
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_all_estimators.py", "new_path": "tests/ope/test_all_estimators.py", "diff": "from typing import Set\nimport numpy as np\n+import pytest\nfrom obp import ope\nfrom obp.ope import OffPolicyEvaluation\nfrom obp.types import BanditFeedback\n+from conftest import generate_action_dist\n+\n+\n+# action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\n+invalid_input_of_estimation = [\n+ (\n+ None,\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ None,\n+ np.zeros((5, 4, 3)),\n+ \"action_dist must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 1)[:, :, 0],\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ None,\n+ np.zeros((5, 4, 1)),\n+ \"action_dist.ndim must be 3-dimensional\",\n+ ),\n+ (\n+ np.ones((5, 4, 3)),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ None,\n+ np.zeros((5, 4, 3)),\n+ \"action_dist must be a probability distribution\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ \"4\",\n+ np.zeros((5, 4, 3)),\n+ \"position must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.zeros((5, 4), dtype=int),\n+ np.zeros((5, 4, 3)),\n+ \"position must be 1-dimensional\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.zeros(5),\n+ np.zeros((5, 4, 3)),\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.zeros(5) - 1,\n+ np.zeros((5, 4, 3)),\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.zeros(4, dtype=int),\n+ np.zeros((5, 4, 3)),\n+ \"the first dimension of position and the first dimension of action_dist must be the same.\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.ones(5, dtype=int) * 8,\n+ np.zeros((5, 4, 3)),\n+ \"position elements must be smaller than the third dimension of action_dist\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ None,\n+ np.zeros((5, 4, 3)),\n+ \"position elements must be given when the third dimension of action_dist is greator than 1\",\n+ ),\n+]\n+\n+valid_input_of_estimation = [\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"all argumnents are given and len_list > 1\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 1),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.zeros(5, dtype=int),\n+ np.zeros((5, 4, 1)),\n+ \"all argumnents are given and len_list == 1\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 1),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ None,\n+ np.zeros((5, 4, 1)),\n+ \"position argumnent is None\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\",\n+ invalid_input_of_estimation,\n+)\n+def test_estimation_of_all_estimators_using_invalid_input_data(\n+ action_dist: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: np.ndarray,\n+ position: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ description: str,\n+) -> None:\n+ all_estimators = ope.__all_estimators__\n+ estimators = [\n+ getattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n+ ]\n+ # estimate_intervals function raises ValueError of all estimators\n+ for estimator in estimators:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ est = estimator.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ assert est == 0.0, f\"policy value must be 0, but {est}\"\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = estimator.estimate_interval(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+\[email protected](\n+ \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\",\n+ valid_input_of_estimation,\n+)\n+def test_estimation_of_all_estimators_using_valid_input_data(\n+ action_dist: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: np.ndarray,\n+ position: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ description: str,\n+) -> None:\n+ all_estimators = ope.__all_estimators__\n+ estimators = [\n+ getattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n+ ]\n+ # estimate_intervals function raises ValueError of all estimators\n+ for estimator in estimators:\n+ _ = estimator.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = estimator.estimate_interval(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+\n+# alpha, n_bootstrap_samples, random_state, description\n+invalid_input_of_estimate_intervals = [\n+ (0.05, 100, \"s\", \"random_state must be an integer\"),\n+ (0.05, -1, 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.05, \"s\", 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (1.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (\"0\", 1, 1, \"alpha must be a positive float (< 1)\"),\n+]\n+\n+valid_input_of_estimate_intervals = [\n+ (0.05, 100, 1, \"random_state is 1\"),\n+ (0.05, 1, 1, \"n_bootstrap_samples is 1\"),\n+]\n+\n+\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description\",\n+ invalid_input_of_estimate_intervals,\n+)\n+def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n+ random_action_dist: np.ndarray,\n+) -> None:\n+ \"\"\"\n+ Test the response of estimate_intervals using invalid data\n+ \"\"\"\n+ bandit_feedback = synthetic_bandit_feedback\n+ action_dist = random_action_dist\n+ expected_reward = np.expand_dims(\n+ synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n+ )\n+ # test most of the estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\n+ all_estimators = ope.__all_estimators__\n+ estimators = [\n+ getattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n+ ]\n+ # estimate_intervals function raises ValueError of all estimators\n+ for estimator in estimators:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = estimator.estimate_interval(\n+ reward=bandit_feedback[\"reward\"],\n+ action=bandit_feedback[\"action\"],\n+ position=bandit_feedback[\"position\"],\n+ pscore=bandit_feedback[\"pscore\"],\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=expected_reward,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description\",\n+ valid_input_of_estimate_intervals,\n+)\n+def test_estimate_intervals_of_all_estimators_using_valid_input_data(\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description: str,\n+ synthetic_bandit_feedback: BanditFeedback,\n+ random_action_dist: np.ndarray,\n+) -> None:\n+ \"\"\"\n+ Test the response of estimate_intervals using valid data\n+ \"\"\"\n+ bandit_feedback = synthetic_bandit_feedback\n+ action_dist = random_action_dist\n+ expected_reward = np.expand_dims(\n+ synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n+ )\n+ # test most of the estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\n+ all_estimators = ope.__all_estimators__\n+ estimators = [\n+ getattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n+ ]\n+ # estimate_intervals function raises ValueError of all estimators\n+ for estimator in estimators:\n+ _ = estimator.estimate_interval(\n+ reward=bandit_feedback[\"reward\"],\n+ action=bandit_feedback[\"action\"],\n+ position=bandit_feedback[\"position\"],\n+ pscore=bandit_feedback[\"pscore\"],\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=expected_reward,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\ndef test_fixture(\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add validation test in test_all_estimators
641,006
06.02.2021 23:01:29
-32,400
0fee516a57e4063e1d5f4a5358d01984db6263b1
add validation test in test_dm_estimators
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_dm_estimators.py", "new_path": "tests/ope/test_dm_estimators.py", "diff": "@@ -5,6 +5,55 @@ import numpy as np\nfrom obp.types import BanditFeedback\nfrom obp.ope import DirectMethod\n+from conftest import generate_action_dist\n+\n+\n+# action_dist, position, estimated_rewards_by_reg_model, description\n+invalid_input_of_dm = [\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros((5, 4, 2)),\n+ \"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ None,\n+ \"estimated_rewards_by_reg_model must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ \"4\",\n+ \"estimated_rewards_by_reg_model must be ndarray\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_dist, position, estimated_rewards_by_reg_model, description\",\n+ invalid_input_of_dm,\n+)\n+def test_dm_using_invalid_input_data(\n+ action_dist: np.ndarray,\n+ position: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ description: str,\n+) -> None:\n+ dm = DirectMethod()\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dm.estimate_policy_value(\n+ action_dist=action_dist,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dm.estimate_interval(\n+ action_dist=action_dist,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\ndef test_dm_using_random_evaluation_policy(\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add validation test in test_dm_estimators
641,006
06.02.2021 23:01:37
-32,400
9295fbeeec5b2955b967bda8aa4734501f5bb4d6
add validation test in test_ipw_estimators
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_ipw_estimators.py", "new_path": "tests/ope/test_ipw_estimators.py", "diff": "@@ -8,13 +8,184 @@ from obp.ope import (\nInverseProbabilityWeighting,\nSelfNormalizedInverseProbabilityWeighting,\n)\n-\n+from conftest import generate_action_dist\n# prepare ipw instances\nipw = InverseProbabilityWeighting()\nsnipw = SelfNormalizedInverseProbabilityWeighting()\n+# action_dist, action, reward, pscore, position, description\n+invalid_input_of_ipw = [\n+ (\n+ generate_action_dist(5, 4, 3),\n+ None,\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"action must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ None,\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ None,\n+ np.random.choice([0, 1, 2], size=5),\n+ \"pscore must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=float),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int) - 1,\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ \"4\",\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"action must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros((3, 2), dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"action must be 1-dimensional\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int) + 8,\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"action elements must be smaller than the second dimension of action_dist\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ \"4\",\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros((3, 2), dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"reward must be 1-dimensional\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(4, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"action and reward must be the same size.\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ \"4\",\n+ np.random.choice([0, 1, 2], size=5),\n+ \"pscore must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones((5, 3)),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"pscore must be 1-dimensional\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(4),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"action, reward, and pscore must be the same size.\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.arange(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"pscore must be positive\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_dist, action, reward, pscore, position, description\",\n+ invalid_input_of_ipw,\n+)\n+def test_ipw_using_invalid_input_data(\n+ action_dist: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: np.ndarray,\n+ position: np.ndarray,\n+ description: str,\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ipw.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ipw.estimate_interval(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = snipw.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = snipw.estimate_interval(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ )\n+\n+\ndef test_ipw_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add validation test in test_ipw_estimators
641,006
06.02.2021 23:01:43
-32,400
05ff8058d1212453b7510a547e2b5b9230abf2e8
add validation test in test_dr_estimators
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_dr_estimators.py", "new_path": "tests/ope/test_dr_estimators.py", "diff": "@@ -13,23 +13,307 @@ from obp.ope import (\nSwitchDoublyRobust,\nSelfNormalizedDoublyRobust,\n)\n-\n+from conftest import generate_action_dist\n# prepare instances\nipw = InverseProbabilityWeighting()\ndm = DirectMethod()\ndr = DoublyRobust()\n-dr_shrink_0 = DoublyRobustWithShrinkage(lambda_=0)\n+dr_shrink_0 = DoublyRobustWithShrinkage(lambda_=0.0)\ndr_shrink_max = DoublyRobustWithShrinkage(lambda_=1e10)\nsndr = SelfNormalizedDoublyRobust()\n-switch_ipw_0 = SwitchInverseProbabilityWeighting(tau=0)\n+switch_ipw_0 = SwitchInverseProbabilityWeighting(tau=0.0)\nswitch_ipw_max = SwitchInverseProbabilityWeighting(tau=1e10)\n-switch_dr_0 = SwitchDoublyRobust(tau=0)\n+switch_dr_0 = SwitchDoublyRobust(tau=0.0)\nswitch_dr_max = SwitchDoublyRobust(tau=1e10)\ndr_estimators = [dr, dr_shrink_0, sndr, switch_ipw_0, switch_dr_0]\n+# dr and self-normalized dr\n+# action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\n+invalid_input_of_dr = [\n+ (\n+ generate_action_dist(5, 4, 3),\n+ None,\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"action must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ None,\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ None,\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"pscore must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ None,\n+ \"estimated_rewards_by_reg_model must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=float),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int) - 1,\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ \"4\",\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"action must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros((3, 2), dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"action must be 1-dimensional\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int) + 8,\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"action elements must be smaller than the second dimension of action_dist\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ \"4\",\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros((3, 2), dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"reward must be 1-dimensional\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(4, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"action and reward must be the same size.\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ \"4\",\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"pscore must be ndarray\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones((5, 3)),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"pscore must be 1-dimensional\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(4),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"action, reward, and pscore must be the same size.\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.arange(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ \"pscore must be positive\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 2)),\n+ \"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.ones(5),\n+ np.random.choice([0, 1, 2], size=5),\n+ \"4\",\n+ \"estimated_rewards_by_reg_model must be ndarray\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\",\n+ invalid_input_of_dr,\n+)\n+def test_dr_using_invalid_input_data(\n+ action_dist: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: np.ndarray,\n+ position: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ description: str,\n+) -> None:\n+ # estimate_intervals function raises ValueError of all estimators\n+ for estimator in [dr, sndr]:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = estimator.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = estimator.estimate_interval(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+\n+# switch-ipw and switch-dr\n+\n+invalid_input_of_switch = [\n+ (\"a\", \"switching hyperparameter must be float\"),\n+ (-1.0, \"switching hyperparameter must be larger than or equal to zero\"),\n+]\n+\n+\[email protected](\n+ \"tau, description\",\n+ invalid_input_of_switch,\n+)\n+def test_switch_using_invalid_input_data(tau: float, description: str) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = SwitchDoublyRobust(tau=tau)\n+\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = SwitchInverseProbabilityWeighting(tau=tau)\n+\n+\n+# dr-os\n+invalid_input_of_shrinkage = [\n+ (\"a\", \"shrinkage hyperparameter must be float\"),\n+ (-1.0, \"shrinkage hyperparameter must be larger than or equal to zero\"),\n+]\n+\n+\[email protected](\n+ \"lambda_, description\",\n+ invalid_input_of_shrinkage,\n+)\n+def test_shrinkage_using_invalid_input_data(lambda_: float, description: str) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = DoublyRobustWithShrinkage(lambda_=lambda_)\n+\n+\n+# dr variants\n+valid_input_of_dr_variants = [\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.random.uniform(low=0.5, high=1.0, size=5),\n+ np.random.choice([0, 1, 2], size=5),\n+ np.zeros((5, 4, 3)),\n+ 0.5,\n+ \"all argumnents are given and len_list > 1\",\n+ )\n+]\n+\n+\[email protected](\n+ \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, hyperparameter, description\",\n+ valid_input_of_dr_variants,\n+)\n+def test_dr_variants_using_valid_input_data(\n+ action_dist: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: np.ndarray,\n+ position: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ hyperparameter: float,\n+ description: str,\n+) -> None:\n+ # check dr variants\n+ switch_dr = SwitchDoublyRobust(tau=hyperparameter)\n+ switch_ipw = SwitchInverseProbabilityWeighting(tau=hyperparameter)\n+ dr_os = DoublyRobustWithShrinkage(lambda_=hyperparameter)\n+ for estimator in [switch_dr, switch_ipw, dr_os]:\n+ est = estimator.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ assert est == 0.0, f\"policy value must be 0, but {est}\"\n+\n+\ndef test_dr_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add validation test in test_dr_estimators
641,006
06.02.2021 23:20:09
-32,400
944061da08960f6c9265504f3e41e7247f9710eb
fix variable names in test_meta
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_meta.py", "new_path": "tests/ope/test_meta.py", "diff": "@@ -567,7 +567,7 @@ def test_meta_summarize_off_policy_estimates(\nassert_frame_equal(interval, expected_interval), \"Invalid summarization (interval)\"\n-invalid_input_of_evaluation_performance = [\n+invalid_input_of_evaluation_performance_of_estimators = [\n(\"foo\", 0.3, \"metric must be either 'relative-ee' or 'se'\"),\n(\"se\", 1, \"ground_truth_policy_value must be a float\"),\n(\"se\", \"a\", \"ground_truth_policy_value must be a float\"),\n@@ -578,7 +578,7 @@ invalid_input_of_evaluation_performance = [\n),\n]\n-valid_input_of_evaluation_performance = [\n+valid_input_of_evaluation_performance_of_estimators = [\n(\"se\", 0.0, \"metric is se and ground_truth_policy_value is 0.0\"),\n(\"relative-ee\", 1.0, \"metric is relative-ee and ground_truth_policy_value is 1.0\"),\n]\n@@ -590,7 +590,7 @@ valid_input_of_evaluation_performance = [\n)\[email protected](\n\"metric, ground_truth_policy_value, description_2\",\n- invalid_input_of_evaluation_performance,\n+ invalid_input_of_evaluation_performance_of_estimators,\n)\ndef test_meta_evaluate_performance_of_estimators_using_invalid_input_data(\naction_dist,\n@@ -630,7 +630,7 @@ def test_meta_evaluate_performance_of_estimators_using_invalid_input_data(\n)\[email protected](\n\"metric, ground_truth_policy_value, description_2\",\n- valid_input_of_evaluation_performance,\n+ valid_input_of_evaluation_performance_of_estimators,\n)\ndef test_meta_evaluate_performance_of_estimators_using_valid_input_data(\naction_dist,\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix variable names in test_meta
641,006
06.02.2021 23:33:27
-32,400
bd7af34d2088b586713ab7ed0d4e2f06196b305a
assert -> ValueError; utils and post init
[ { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -62,17 +62,19 @@ class RegressionModel(BaseEstimator):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- assert self.fitting_method in [\n- \"normal\",\n- \"iw\",\n- \"mrdr\",\n- ], f\"fitting_method must be one of 'normal', 'iw', or 'mrdr', but {self.fitting_method} is given\"\n- assert self.n_actions > 1 and isinstance(\n- self.n_actions, int\n- ), f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n- assert self.len_list > 0 and isinstance(\n- self.len_list, int\n- ), f\"len_list must be a positive integer, but {self.len_list} is given\"\n+ if self.fitting_method not in [\"normal\", \"iw\", \"mrdr\"]:\n+ raise ValueError(\n+ f\"fitting_method must be one of 'normal', 'iw', or 'mrdr', but {self.fitting_method} is given\"\n+ )\n+ if not (isinstance(self.n_actions, int) and self.n_actions > 1):\n+ raise ValueError(\n+ f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n+ )\n+ if not (isinstance(self.len_list, int) and self.list > 0):\n+ raise ValueError(\n+ f\"len_list must be a positive integer, but {self.len_list} is given\"\n+ )\n+\nself.base_model_list = [\nclone(self.base_model) for _ in np.arange(self.len_list)\n]\n@@ -334,7 +336,10 @@ class RegressionModel(BaseEstimator):\nreturn estimated_rewards_by_reg_model\ndef _pre_process_for_reg_model(\n- self, context: np.ndarray, action: np.ndarray, action_context: np.ndarray,\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ action_context: np.ndarray,\n) -> np.ndarray:\n\"\"\"Preprocess feature vectors to train a give regression model.\n" }, { "change_type": "MODIFY", "old_path": "obp/utils.py", "new_path": "obp/utils.py", "diff": "@@ -195,35 +195,53 @@ def check_bandit_feedback_inputs(\nContext vectors characterizing each action.\n\"\"\"\n- assert isinstance(context, np.ndarray), \"context must be ndarray\"\n- assert context.ndim == 2, \"context must be 2-dimensional\"\n- assert isinstance(action, np.ndarray), \"action must be ndarray\"\n- assert action.ndim == 1, \"action must be 1-dimensional\"\n- assert isinstance(reward, np.ndarray), \"reward must be ndarray\"\n- assert reward.ndim == 1, \"reward must be 1-dimensional\"\n+ if not isinstance(context, np.ndarray):\n+ raise ValueError(\"context must be ndarray\")\n+ if context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+ if action.ndim != 1:\n+ raise ValueError(\"action must be 1-dimensional\")\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional\")\nif pscore is not None:\n- assert isinstance(pscore, np.ndarray), \"pscore must be ndarray\"\n- assert pscore.ndim == 1, \"pscore must be 1-dimensional\"\n- assert (\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+ if pscore.ndim != 1:\n+ raise ValueError(\"pscore must be 1-dimensional\")\n+ if not (\ncontext.shape[0] == action.shape[0] == reward.shape[0] == pscore.shape[0]\n- ), \"context, action, reward, and pscore must be the same size.\"\n+ ):\n+ raise ValueError(\n+ \"context, action, reward, and pscore must be the same size.\"\n+ )\nif position is not None:\n- assert isinstance(position, np.ndarray), \"position must be ndarray\"\n- assert position.ndim == 1, \"position must be 1-dimensional\"\n- assert (\n+ if not isinstance(position, np.ndarray):\n+ raise ValueError(\"position must be ndarray\")\n+ if position.ndim != 1:\n+ raise ValueError(\"position must be 1-dimensional\")\n+ if not (\ncontext.shape[0] == action.shape[0] == reward.shape[0] == position.shape[0]\n- ), \"context, action, reward, and position must be the same size.\"\n+ ):\n+ raise ValueError(\n+ \"context, action, reward, and position must be the same size.\"\n+ )\nelse:\n- assert (\n- context.shape[0] == action.shape[0] == reward.shape[0]\n- ), \"context, action, and reward must be the same size.\"\n+ if not (context.shape[0] == action.shape[0] == reward.shape[0]):\n+ raise ValueError(\"context, action, and reward must be the same size.\")\nif action_context is not None:\n- assert isinstance(action_context, np.ndarray), \"action_context must be ndarray\"\n- assert action_context.ndim == 2, \"action_context must be 2-dimensional\"\n- assert (action.max() + 1) == action_context.shape[\n- 0\n- ], \"the number of action and the size of the first dimension of action_context must be same.\"\n+ if not isinstance(action_context, np.ndarray):\n+ raise ValueError(\"action_context must be ndarray\")\n+ if action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional\")\n+ if (action.max() + 1) != action_context.shape[0]:\n+ raise ValueError(\n+ \"the number of action and the size of the first dimension of action_context must be same.\"\n+ )\ndef sigmoid(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
assert -> ValueError; utils and post init
641,006
06.02.2021 23:52:46
-32,400
70bcbdde2331b24525f5c99edf9a2b9c144649e3
add more validations in regression models
[ { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -131,18 +131,23 @@ class RegressionModel(BaseEstimator):\nif self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\n- assert (\n- isinstance(position, np.ndarray) and position.ndim == 1\n- ), f\"when len_list > 1, position must be a 1-dimensional ndarray\"\n+ if not (isinstance(position, np.ndarray) and position.ndim == 1):\n+ raise ValueError(\n+ \"when len_list > 1, position must be a 1-dimensional ndarray\"\n+ )\n+ if position.max() >= self.len_list:\n+ raise ValueError(\n+ f\"position elements must be smaller than len_list, but {position.max()}\"\n+ )\nif self.fitting_method in [\"iw\", \"mrdr\"]:\n- assert (\n- isinstance(action_dist, np.ndarray) and action_dist.ndim == 3\n- ), f\"when fitting_method is either 'iw' or 'mrdr', action_dist must be a 3-dimensional ndarray\"\n- assert action_dist.shape == (\n- n_rounds,\n- self.n_actions,\n- self.len_list,\n- ), f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list})\"\n+ if not (isinstance(action_dist, np.ndarray) and action_dist.ndim == 3):\n+ raise ValueError(\n+ \"when fitting_method is either 'iw' or 'mrdr', action_dist must be a 3-dimensional ndarray\"\n+ )\n+ if action_dist.shape != (n_rounds, self.n_actions, self.len_list):\n+ raise ValueError(\n+ f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list})\"\n+ )\nif pscore is None:\npscore = np.ones_like(action) / self.n_actions\n@@ -281,24 +286,31 @@ class RegressionModel(BaseEstimator):\n)\nn_rounds = context.shape[0]\n- assert n_folds > 0 and isinstance(\n- n_folds, int\n- ), f\"n_folds must be a positive integer, but {n_folds} is given\"\n+ if not (isinstance(n_folds, int) and n_folds > 0):\n+ raise ValueError(\n+ f\"n_folds must be a positive integer, but {n_folds} is given\"\n+ )\n+\nif self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\n- assert (\n- isinstance(position, np.ndarray) and position.ndim == 1\n- ), f\"when len_list > 1, position must be a 1-dimensional ndarray\"\n+ if not (isinstance(position, np.ndarray) and position.ndim == 1):\n+ raise ValueError(\n+ \"when len_list > 1, position must be a 1-dimensional ndarray\"\n+ )\n+ if position.max() >= self.len_list:\n+ raise ValueError(\n+ f\"position elements must be smaller than len_list, but {position.max()}\"\n+ )\nif self.fitting_method in [\"iw\", \"mrdr\"]:\n- assert (\n- isinstance(action_dist, np.ndarray) and action_dist.ndim == 3\n- ), f\"when fitting_method is either 'iw' or 'mrdr', action_dist must be a 3-dimensional ndarray\"\n- assert action_dist.shape == (\n- n_rounds,\n- self.n_actions,\n- self.len_list,\n- ), f\"shape of action_dist must be (n_rounds, n_actions, len_list)={n_rounds, self.n_actions, self.len_list}, but is {action_dist.shape}\"\n+ if not (isinstance(action_dist, np.ndarray) and action_dist.ndim == 3):\n+ raise ValueError(\n+ \"when fitting_method is either 'iw' or 'mrdr', action_dist must be a 3-dimensional ndarray\"\n+ )\n+ if action_dist.shape != (n_rounds, self.n_actions, self.len_list):\n+ raise ValueError(\n+ f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list})\"\n+ )\nif pscore is None:\npscore = np.ones_like(action) / self.n_actions\n" }, { "change_type": "MODIFY", "old_path": "obp/utils.py", "new_path": "obp/utils.py", "diff": "@@ -207,6 +207,8 @@ def check_bandit_feedback_inputs(\nraise ValueError(\"reward must be ndarray\")\nif reward.ndim != 1:\nraise ValueError(\"reward must be 1-dimensional\")\n+ if not (action.dtype == int and action.min() >= 0):\n+ raise ValueError(\"action elements must be non-negative integers\")\nif pscore is not None:\nif not isinstance(pscore, np.ndarray):\n@@ -219,6 +221,9 @@ def check_bandit_feedback_inputs(\nraise ValueError(\n\"context, action, reward, and pscore must be the same size.\"\n)\n+ if np.any(pscore <= 0):\n+ raise ValueError(\"pscore must be positive\")\n+\nif position is not None:\nif not isinstance(position, np.ndarray):\nraise ValueError(\"position must be ndarray\")\n@@ -230,6 +235,8 @@ def check_bandit_feedback_inputs(\nraise ValueError(\n\"context, action, reward, and position must be the same size.\"\n)\n+ if not (position.dtype == int and position.min() >= 0):\n+ raise ValueError(\"position elements must be non-negative integers\")\nelse:\nif not (context.shape[0] == action.shape[0] == reward.shape[0]):\nraise ValueError(\"context, action, and reward must be the same size.\")\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add more validations in regression models
641,006
07.02.2021 18:18:49
-32,400
059b6558013c075555077419c7ba11f61f839095
fix validaition of regression models
[ { "change_type": "MODIFY", "old_path": "obp/ope/regression_model.py", "new_path": "obp/ope/regression_model.py", "diff": "@@ -62,7 +62,10 @@ class RegressionModel(BaseEstimator):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if self.fitting_method not in [\"normal\", \"iw\", \"mrdr\"]:\n+ if not (\n+ isinstance(self.fitting_method, str)\n+ and self.fitting_method in [\"normal\", \"iw\", \"mrdr\"]\n+ ):\nraise ValueError(\nf\"fitting_method must be one of 'normal', 'iw', or 'mrdr', but {self.fitting_method} is given\"\n)\n@@ -70,10 +73,14 @@ class RegressionModel(BaseEstimator):\nraise ValueError(\nf\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n)\n- if not (isinstance(self.len_list, int) and self.list > 0):\n+ if not (isinstance(self.len_list, int) and self.len_list > 0):\nraise ValueError(\nf\"len_list must be a positive integer, but {self.len_list} is given\"\n)\n+ if not isinstance(self.base_model, BaseEstimator):\n+ raise ValueError(\n+ \"base_model must be BaseEstimator or a child class of BaseEstimator\"\n+ )\nself.base_model_list = [\nclone(self.base_model) for _ in np.arange(self.len_list)\n@@ -148,6 +155,8 @@ class RegressionModel(BaseEstimator):\nraise ValueError(\nf\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list})\"\n)\n+ if not np.allclose(action_dist.sum(axis=1), 1):\n+ raise ValueError(\"action_dist must be a probability distribution\")\nif pscore is None:\npscore = np.ones_like(action) / self.n_actions\n@@ -158,6 +167,8 @@ class RegressionModel(BaseEstimator):\naction=action[idx],\naction_context=self.action_context,\n)\n+ if X.shape[0] == 0:\n+ raise ValueError(f\"No training data at position {position_}\")\n# train the base model according to the given `fitting method`\nif self.fitting_method == \"normal\":\nself.base_model_list[position_].fit(X, reward[idx])\n@@ -291,6 +302,11 @@ class RegressionModel(BaseEstimator):\nf\"n_folds must be a positive integer, but {n_folds} is given\"\n)\n+ if random_state is not None and not isinstance(random_state, int):\n+ raise ValueError(\n+ f\"random_state must be an integer, but {random_state} is given\"\n+ )\n+\nif self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\n" }, { "change_type": "MODIFY", "old_path": "obp/utils.py", "new_path": "obp/utils.py", "diff": "@@ -245,9 +245,9 @@ def check_bandit_feedback_inputs(\nraise ValueError(\"action_context must be ndarray\")\nif action_context.ndim != 2:\nraise ValueError(\"action_context must be 2-dimensional\")\n- if (action.max() + 1) != action_context.shape[0]:\n+ if action.max() >= action_context.shape[0]:\nraise ValueError(\n- \"the number of action and the size of the first dimension of action_context must be same.\"\n+ \"action elements must be smaller than the size of the first dimension of action_context\"\n)\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix validaition of regression models
641,006
07.02.2021 18:19:01
-32,400
7a94bd2dc0877e0e15e7357f0c22b52e32a7e649
fix regression model testing
[ { "change_type": "MODIFY", "old_path": "tests/ope/conftest.py", "new_path": "tests/ope/conftest.py", "diff": "@@ -99,3 +99,9 @@ def random_action_dist(synthetic_bandit_feedback) -> np.ndarray:\nn_rounds=synthetic_bandit_feedback[\"n_rounds\"]\n)\nreturn action_dist\n+\n+\n+def generate_action_dist(i, j, k):\n+ x = np.random.uniform(size=(i, j, k))\n+ action_dist = x / x.sum(axis=1)[:, np.newaxis, :]\n+ return action_dist\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/hyperparams.yaml", "new_path": "tests/ope/hyperparams.yaml", "diff": "@@ -13,3 +13,5 @@ random_forest:\nmax_depth: 5\nmin_samples_leaf: 10\nrandom_state: 12345\n+ridge:\n+ alpha: 0.2\n" }, { "change_type": "MODIFY", "old_path": "tests/ope/test_regression_models.py", "new_path": "tests/ope/test_regression_models.py", "diff": "@@ -5,13 +5,18 @@ import yaml\nimport numpy as np\nfrom sklearn.experimental import enable_hist_gradient_boosting # noqa\nfrom sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier\n-from sklearn.linear_model import LogisticRegression\n+from sklearn.linear_model import LogisticRegression, Ridge\nfrom sklearn.metrics import roc_auc_score\n+from sklearn.base import BaseEstimator\n+import pytest\nfrom obp.ope import RegressionModel\nfrom obp.types import BanditFeedback\n+from conftest import generate_action_dist\n+np.random.seed(1)\n+\nbinary_model_dict = dict(\nlogistic_regression=LogisticRegression,\nlightgbm=HistGradientBoostingClassifier,\n@@ -24,6 +29,757 @@ with open(cd_path / \"hyperparams.yaml\", \"rb\") as f:\nhyperparams = yaml.safe_load(f)\n+# action_context, n_actions, len_list, fitting_method, base_model, description\n+n_rounds = 10\n+n_actions = 3\n+len_list = 3\n+\n+invalid_input_of_initializing_regression_models = [\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ \"a\",\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ \"n_actions must be an integer larger than 1\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ 1,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ \"n_actions must be an integer larger than 1\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ \"a\",\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ \"len_list must be a positive integer\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ 0,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ \"len_list must be a positive integer\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ 1,\n+ Ridge(**hyperparams[\"ridge\"]),\n+ \"fitting_method must be one of\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"awesome\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ \"fitting_method must be one of\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ \"RandomForest\",\n+ \"base_model must be BaseEstimator or a child class of BaseEstimator\",\n+ ),\n+]\n+\n+\n+# context, action, reward, pscore, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, description\n+\n+invalid_input_of_fitting_regression_models = [\n+ (\n+ None,\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"context must be ndarray\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ None,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"action must be ndarray\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ None,\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7, 3)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"context must be 2-dimensional\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=(n_rounds, 3)),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"action must be 1-dimensional\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=(n_rounds, 3)),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"reward must be 1-dimensional\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice([\"1\", \"a\"], size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice([-1, -3], size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ \"3\",\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"pscore must be ndarray\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones((n_rounds, 2)) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"pscore must be 1-dimensional\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds - 1) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"context, action, reward, and pscore must be the same size.\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.arange(n_rounds),\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"pscore must be positive\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ \"3\",\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"position must be ndarray\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=(n_rounds, 3)),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"position must be 1-dimensional\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds - 1),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"context, action, reward, and position must be the same size.\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice([\"a\", \"1\"], size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice([-1, -3], size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds - 1),\n+ np.random.uniform(size=n_rounds),\n+ None,\n+ None,\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"context, action, and reward must be the same size\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(range(n_actions), size=n_rounds - 1),\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ None,\n+ None,\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"context, action, reward, and pscore must be the same size\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ \"3\",\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"action_context must be ndarray\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8, 3)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"action_context must be 2-dimensional\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ (np.arange(n_rounds) % n_actions) + 1,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"action elements must be smaller than the size of the first dimension of action_context\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ None,\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"when len_list > 1, position must be a 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.ones(n_rounds, dtype=int) * len_list,\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"position elements must be smaller than len_list\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"iw\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"when fitting_method is either\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"mrdr\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 3,\n+ 1,\n+ \"when fitting_method is either\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"iw\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ np.zeros((n_rounds, n_actions, len_list - 1)),\n+ 3,\n+ 1,\n+ \"shape of action_dist must be (n_rounds, n_actions, len_list)\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"iw\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ np.zeros((n_rounds, n_actions, len_list)),\n+ 3,\n+ 1,\n+ \"action_dist must be a probability distribution\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 0,\n+ None,\n+ \"n_folds must be a positive integer\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ \"a\",\n+ None,\n+ \"n_folds must be a positive integer\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ \"a\",\n+ \"random_state must be an integer\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.zeros(n_rounds, dtype=int),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ \"No training data at position\",\n+ ),\n+]\n+\n+\n+valid_input_of_regression_models = [\n+ (\n+ np.random.uniform(size=(n_rounds * 100, 7)),\n+ np.arange(n_rounds * 100) % n_actions,\n+ np.random.uniform(size=n_rounds * 100),\n+ np.ones(n_rounds * 100) * 2,\n+ np.random.choice(len_list, size=n_rounds * 100),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ generate_action_dist(n_rounds * 100, n_actions, len_list),\n+ 3,\n+ 1,\n+ \"valid input with cross fitting\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 1,\n+ 1,\n+ \"valid input without cross fitting\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_context, n_actions, len_list, fitting_method, base_model, description\",\n+ invalid_input_of_initializing_regression_models,\n+)\n+def test_initializing_regression_models_using_invalid_input_data(\n+ action_context: np.ndarray,\n+ n_actions: int,\n+ len_list: int,\n+ fitting_method: str,\n+ base_model: BaseEstimator,\n+ description: str,\n+) -> None:\n+ # initialization raises ValueError\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = RegressionModel(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ action_context=action_context,\n+ base_model=base_model,\n+ fitting_method=fitting_method,\n+ )\n+\n+\[email protected](\n+ \"context, action, reward, pscore, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, n_folds, random_state, description\",\n+ invalid_input_of_fitting_regression_models,\n+)\n+def test_fitting_regression_models_using_invalid_input_data(\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: np.ndarray,\n+ position: np.ndarray,\n+ action_context: np.ndarray,\n+ n_actions: int,\n+ len_list: int,\n+ fitting_method: str,\n+ base_model: BaseEstimator,\n+ action_dist: np.ndarray,\n+ n_folds: int,\n+ random_state: int,\n+ description: str,\n+) -> None:\n+ # fit_predict function raises ValueError\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ regression_model = RegressionModel(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ action_context=action_context,\n+ base_model=base_model,\n+ fitting_method=fitting_method,\n+ )\n+ if fitting_method == \"normal\":\n+ # train regression model on logged bandit feedback data\n+ _ = regression_model.fit_predict(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ action_dist=action_dist,\n+ n_folds=n_folds,\n+ random_state=random_state,\n+ )\n+ else:\n+ # train regression model on logged bandit feedback data\n+ _ = regression_model.fit_predict(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ action_dist=action_dist,\n+ n_folds=n_folds,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"context, action, reward, pscore, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, n_folds, random_state, description\",\n+ valid_input_of_regression_models,\n+)\n+def test_regression_models_using_valid_input_data(\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: np.ndarray,\n+ position: np.ndarray,\n+ action_context: np.ndarray,\n+ n_actions: int,\n+ len_list: int,\n+ fitting_method: str,\n+ base_model: BaseEstimator,\n+ action_dist: np.ndarray,\n+ n_folds: int,\n+ random_state: int,\n+ description: str,\n+) -> None:\n+ # fit_predict\n+ regression_model = RegressionModel(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ action_context=action_context,\n+ base_model=base_model,\n+ fitting_method=fitting_method,\n+ )\n+ if fitting_method == \"normal\":\n+ # train regression model on logged bandit feedback data\n+ _ = regression_model.fit_predict(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ action_dist=action_dist,\n+ n_folds=n_folds,\n+ random_state=random_state,\n+ )\n+ else:\n+ # train regression model on logged bandit feedback data\n+ _ = regression_model.fit_predict(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ action_dist=action_dist,\n+ n_folds=n_folds,\n+ random_state=random_state,\n+ )\n+\n+\ndef test_performance_of_binary_outcome_models(\nfixed_synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
fix regression model testing
641,006
07.02.2021 18:30:01
-32,400
eee41004287d342c237dc114cc8c3a1f7db96e66
add comment in invalid testing of regression models
[ { "change_type": "MODIFY", "old_path": "tests/ope/test_regression_models.py", "new_path": "tests/ope/test_regression_models.py", "diff": "@@ -37,7 +37,7 @@ len_list = 3\ninvalid_input_of_initializing_regression_models = [\n(\nnp.random.uniform(size=(n_actions, 8)),\n- \"a\",\n+ \"a\", #\nlen_list,\n\"normal\",\nRidge(**hyperparams[\"ridge\"]),\n@@ -45,7 +45,7 @@ invalid_input_of_initializing_regression_models = [\n),\n(\nnp.random.uniform(size=(n_actions, 8)),\n- 1,\n+ 1, #\nlen_list,\n\"normal\",\nRidge(**hyperparams[\"ridge\"]),\n@@ -54,7 +54,7 @@ invalid_input_of_initializing_regression_models = [\n(\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\n- \"a\",\n+ \"a\", #\n\"normal\",\nRidge(**hyperparams[\"ridge\"]),\n\"len_list must be a positive integer\",\n@@ -62,7 +62,7 @@ invalid_input_of_initializing_regression_models = [\n(\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\n- 0,\n+ 0, #\n\"normal\",\nRidge(**hyperparams[\"ridge\"]),\n\"len_list must be a positive integer\",\n@@ -71,7 +71,7 @@ invalid_input_of_initializing_regression_models = [\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\nlen_list,\n- 1,\n+ 1, #\nRidge(**hyperparams[\"ridge\"]),\n\"fitting_method must be one of\",\n),\n@@ -79,7 +79,7 @@ invalid_input_of_initializing_regression_models = [\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\nlen_list,\n- \"awesome\",\n+ \"awesome\", #\nRidge(**hyperparams[\"ridge\"]),\n\"fitting_method must be one of\",\n),\n@@ -88,7 +88,7 @@ invalid_input_of_initializing_regression_models = [\nn_actions,\nlen_list,\n\"normal\",\n- \"RandomForest\",\n+ \"RandomForest\", #\n\"base_model must be BaseEstimator or a child class of BaseEstimator\",\n),\n]\n@@ -98,7 +98,7 @@ invalid_input_of_initializing_regression_models = [\ninvalid_input_of_fitting_regression_models = [\n(\n- None,\n+ None, #\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n@@ -115,7 +115,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- None,\n+ None, #\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -132,7 +132,7 @@ invalid_input_of_fitting_regression_models = [\n(\nnp.random.uniform(size=(n_rounds, 7)),\nnp.random.choice(range(n_actions), size=n_rounds),\n- None,\n+ None, #\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\nNone,\n@@ -146,7 +146,7 @@ invalid_input_of_fitting_regression_models = [\n\"reward must be ndarray\",\n),\n(\n- np.random.uniform(size=(n_rounds, 7, 3)),\n+ np.random.uniform(size=(n_rounds, 7, 3)), #\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n@@ -163,7 +163,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=(n_rounds, 3)),\n+ np.random.choice(range(n_actions), size=(n_rounds, 3)), #\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -180,7 +180,7 @@ invalid_input_of_fitting_regression_models = [\n(\nnp.random.uniform(size=(n_rounds, 7)),\nnp.random.choice(range(n_actions), size=n_rounds),\n- np.random.uniform(size=(n_rounds, 3)),\n+ np.random.uniform(size=(n_rounds, 3)), #\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\nNone,\n@@ -195,7 +195,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice([\"1\", \"a\"], size=n_rounds),\n+ np.random.choice([\"1\", \"a\"], size=n_rounds), #\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -211,7 +211,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice([-1, -3], size=n_rounds),\n+ np.random.choice([-1, -3], size=n_rounds), #\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -229,7 +229,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.uniform(size=(n_rounds, 7)),\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\n- \"3\",\n+ \"3\", #\nnp.random.choice(len_list, size=n_rounds),\nNone,\nn_actions,\n@@ -245,7 +245,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.uniform(size=(n_rounds, 7)),\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\n- np.ones((n_rounds, 2)) * 2,\n+ np.ones((n_rounds, 2)) * 2, #\nnp.random.choice(len_list, size=n_rounds),\nNone,\nn_actions,\n@@ -261,7 +261,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.uniform(size=(n_rounds, 7)),\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\n- np.ones(n_rounds - 1) * 2,\n+ np.ones(n_rounds - 1) * 2, #\nnp.random.choice(len_list, size=n_rounds),\nNone,\nn_actions,\n@@ -277,7 +277,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.uniform(size=(n_rounds, 7)),\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\n- np.arange(n_rounds),\n+ np.arange(n_rounds), #\nnp.random.choice(len_list, size=n_rounds),\nNone,\nn_actions,\n@@ -294,7 +294,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- \"3\",\n+ \"3\", #\nNone,\nn_actions,\nlen_list,\n@@ -310,7 +310,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- np.random.choice(len_list, size=(n_rounds, 3)),\n+ np.random.choice(len_list, size=(n_rounds, 3)), #\nNone,\nn_actions,\nlen_list,\n@@ -326,7 +326,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- np.random.choice(len_list, size=n_rounds - 1),\n+ np.random.choice(len_list, size=n_rounds - 1), #\nNone,\nn_actions,\nlen_list,\n@@ -342,7 +342,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- np.random.choice([\"a\", \"1\"], size=n_rounds),\n+ np.random.choice([\"a\", \"1\"], size=n_rounds), #\nNone,\nn_actions,\nlen_list,\n@@ -358,7 +358,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.choice(range(n_actions), size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- np.random.choice([-1, -3], size=n_rounds),\n+ np.random.choice([-1, -3], size=n_rounds), #\nNone,\nn_actions,\nlen_list,\n@@ -371,7 +371,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds - 1),\n+ np.random.choice(range(n_actions), size=n_rounds - 1), #\nnp.random.uniform(size=n_rounds),\nNone,\nNone,\n@@ -387,7 +387,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds - 1),\n+ np.random.choice(range(n_actions), size=n_rounds - 1), #\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nNone,\n@@ -407,7 +407,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n- \"3\",\n+ \"3\", #\nn_actions,\nlen_list,\n\"normal\",\n@@ -423,7 +423,7 @@ invalid_input_of_fitting_regression_models = [\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n- np.random.uniform(size=(n_actions, 8, 3)),\n+ np.random.uniform(size=(n_actions, 8, 3)), #\nn_actions,\nlen_list,\n\"normal\",\n@@ -435,7 +435,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- (np.arange(n_rounds) % n_actions) + 1,\n+ (np.arange(n_rounds) % n_actions) + 1, #\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -454,7 +454,7 @@ invalid_input_of_fitting_regression_models = [\nnp.arange(n_rounds) % n_actions,\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- None,\n+ None, #\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\nlen_list,\n@@ -470,7 +470,7 @@ invalid_input_of_fitting_regression_models = [\nnp.arange(n_rounds) % n_actions,\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- np.ones(n_rounds, dtype=int) * len_list,\n+ np.ones(n_rounds, dtype=int) * len_list, #\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\nlen_list,\n@@ -492,7 +492,7 @@ invalid_input_of_fitting_regression_models = [\nlen_list,\n\"iw\",\nRidge(**hyperparams[\"ridge\"]),\n- None,\n+ None, #\n3,\n1,\n\"when fitting_method is either\",\n@@ -508,7 +508,7 @@ invalid_input_of_fitting_regression_models = [\nlen_list,\n\"mrdr\",\nRidge(**hyperparams[\"ridge\"]),\n- None,\n+ None, #\n3,\n1,\n\"when fitting_method is either\",\n@@ -524,7 +524,7 @@ invalid_input_of_fitting_regression_models = [\nlen_list,\n\"iw\",\nRidge(**hyperparams[\"ridge\"]),\n- np.zeros((n_rounds, n_actions, len_list - 1)),\n+ np.zeros((n_rounds, n_actions, len_list - 1)), #\n3,\n1,\n\"shape of action_dist must be (n_rounds, n_actions, len_list)\",\n@@ -540,7 +540,7 @@ invalid_input_of_fitting_regression_models = [\nlen_list,\n\"iw\",\nRidge(**hyperparams[\"ridge\"]),\n- np.zeros((n_rounds, n_actions, len_list)),\n+ np.zeros((n_rounds, n_actions, len_list)), #\n3,\n1,\n\"action_dist must be a probability distribution\",\n@@ -557,7 +557,7 @@ invalid_input_of_fitting_regression_models = [\n\"normal\",\nRidge(**hyperparams[\"ridge\"]),\ngenerate_action_dist(n_rounds, n_actions, len_list),\n- 0,\n+ 0, #\nNone,\n\"n_folds must be a positive integer\",\n),\n@@ -573,7 +573,7 @@ invalid_input_of_fitting_regression_models = [\n\"normal\",\nRidge(**hyperparams[\"ridge\"]),\ngenerate_action_dist(n_rounds, n_actions, len_list),\n- \"a\",\n+ \"a\", #\nNone,\n\"n_folds must be a positive integer\",\n),\n@@ -590,7 +590,7 @@ invalid_input_of_fitting_regression_models = [\nRidge(**hyperparams[\"ridge\"]),\ngenerate_action_dist(n_rounds, n_actions, len_list),\n3,\n- \"a\",\n+ \"a\", #\n\"random_state must be an integer\",\n),\n(\n@@ -598,7 +598,7 @@ invalid_input_of_fitting_regression_models = [\nnp.arange(n_rounds) % n_actions,\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- np.zeros(n_rounds, dtype=int),\n+ np.zeros(n_rounds, dtype=int), #\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\nlen_list,\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
add comment in invalid testing of regression models
641,003
31.01.2021 17:48:16
-32,400
c2efd73ac7af9b0721d485213794d67c88315f93
apply reviewed points
[ { "change_type": "MODIFY", "old_path": "obp/dataset/real.py", "new_path": "obp/dataset/real.py", "diff": "@@ -42,7 +42,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\nReferences\n------------\nYuta Saito, Shunsuke Aihara, Megumi Matsutani, Yusuke Narita.\n- \"Large-scale Open Dataset, Pipeline, and Benchmark for Bandit Algorithms.\", 2020.\n+ \"Open Bandit Dataset and Pipeline: Towards Realistic and Reproducible Off-Policy Evaluation.\", 2020.\n\"\"\"\n@@ -53,15 +53,6 @@ class OpenBanditDataset(BaseRealBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Open Bandit Dataset Class.\"\"\"\n- # assert self.behavior_policy in [\n- # \"bts\",\n- # \"random\",\n- # ], f\"behavior_policy must be either of 'bts' or 'random', but {self.behavior_policy} is given\"\n- # assert self.campaign in [\n- # \"all\",\n- # \"men\",\n- # \"women\",\n- # ], f\"campaign must be one of 'all', 'men', and 'women', but {self.campaign} is given\"\nif self.behavior_policy not in [\n\"bts\",\n\"random\",\n@@ -208,9 +199,10 @@ class OpenBanditDataset(BaseRealBanditDataset):\n\"\"\"\nif is_timeseries_split:\n- assert isinstance(test_size, float) & (\n- 0 < test_size < 1\n- ), f\"test_size must be a float in the (0,1) interval, but {test_size} is given\"\n+ if not isinstance(test_size, float) or (test_size <= 0 or test_size >= 1):\n+ raise ValueError(\n+ f\"test_size must be a float in the (0,1) interval, but {test_size} is given\"\n+ )\nn_rounds_train = np.int(self.n_rounds * (1.0 - test_size))\nreturn dict(\nn_rounds=n_rounds_train,\n" }, { "change_type": "DELETE", "old_path": "test/dataset/test_real.py", "new_path": null, "diff": "-import pytest\n-import numpy as np\n-import pandas as pd\n-\n-from obp.dataset import OpenBanditDataset\n-\n-\n-def test_real_init():\n- # behavior_policy\n- with pytest.raises(ValueError):\n- OpenBanditDataset(behavior_policy=\"aaa\", campaign=\"all\")\n-\n- # campaign\n- with pytest.raises(ValueError):\n- OpenBanditDataset(behavior_policy=\"random\", campaign=\"aaa\")\n-\n- # data_path\n- with pytest.raises(ValueError):\n- OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\", data_path=\"raw_str_path\")\n-\n- # load_raw_data\n- opd = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n- # check the value exists and has the right type\n- assert (\n- isinstance(opd.data, pd.DataFrame)\n- and isinstance(opd.item_context, pd.DataFrame)\n- and isinstance(opd.action, np.ndarray)\n- and isinstance(opd.position, np.ndarray)\n- and isinstance(opd.reward, np.ndarray)\n- and isinstance(opd.pscore, np.ndarray)\n- )\n-\n- # pre_process (context and action_context)\n- assert (\n- isinstance(opd.context, np.ndarray)\n- and isinstance(opd.action_context, np.ndarray)\n- )\n-\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tests/dataset/test_real.py", "diff": "+import pytest\n+import numpy as np\n+import pandas as pd\n+\n+from obp.dataset import OpenBanditDataset\n+\n+\n+def test_real_init():\n+ # behavior_policy\n+ with pytest.raises(ValueError):\n+ OpenBanditDataset(behavior_policy=\"aaa\", campaign=\"all\")\n+\n+ # campaign\n+ with pytest.raises(ValueError):\n+ OpenBanditDataset(behavior_policy=\"random\", campaign=\"aaa\")\n+\n+ # data_path\n+ with pytest.raises(ValueError):\n+ OpenBanditDataset(\n+ behavior_policy=\"random\", campaign=\"all\", data_path=\"raw_str_path\"\n+ )\n+\n+ # load_raw_data\n+ obd = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ # check the value exists and has the right type\n+ assert (\n+ isinstance(obd.data, pd.DataFrame)\n+ and isinstance(obd.item_context, pd.DataFrame)\n+ and isinstance(obd.action, np.ndarray)\n+ and isinstance(obd.position, np.ndarray)\n+ and isinstance(obd.reward, np.ndarray)\n+ and isinstance(obd.pscore, np.ndarray)\n+ )\n+\n+ # pre_process (context and action_context)\n+ assert isinstance(obd.context, np.ndarray) and isinstance(\n+ obd.action_context, np.ndarray\n+ )\n+\n+\n+def test_obtain_batch_bandit_feedback():\n+ # invalid test_size\n+ with pytest.raises(ValueError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.obtain_batch_bandit_feedback(is_timeseries_split=True, test_size=1.3)\n+\n+ with pytest.raises(ValueError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.obtain_batch_bandit_feedback(is_timeseries_split=True, test_size=-0.5)\n+\n+ # existence of keys\n+ # is_timeseries_split=False (default)\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback()\n+\n+ assert \"n_rounds\" in bandit_feedback.keys()\n+ assert \"n_actions\" in bandit_feedback.keys()\n+ assert \"action\" in bandit_feedback.keys()\n+ assert \"position\" in bandit_feedback.keys()\n+ assert \"reward\" in bandit_feedback.keys()\n+ assert \"pscore\" in bandit_feedback.keys()\n+ assert \"context\" in bandit_feedback.keys()\n+ assert \"action_context\" in bandit_feedback.keys()\n+\n+ # is_timeseries_split=True\n+ dataset2 = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ bandit_feedback2 = dataset2.obtain_batch_bandit_feedback(is_timeseries_split=True)\n+\n+ assert \"n_rounds\" in bandit_feedback2.keys()\n+ assert \"n_actions\" in bandit_feedback2.keys()\n+ assert \"action\" in bandit_feedback2.keys()\n+ assert \"action_test\" in bandit_feedback2.keys()\n+ assert \"position\" in bandit_feedback2.keys()\n+ assert \"position_test\" in bandit_feedback2.keys()\n+ assert \"reward\" in bandit_feedback2.keys()\n+ assert \"reward_test\" in bandit_feedback2.keys()\n+ assert \"pscore\" in bandit_feedback2.keys()\n+ assert \"pscore_test\" in bandit_feedback2.keys()\n+ assert \"context\" in bandit_feedback2.keys()\n+ assert \"context_test\" in bandit_feedback2.keys()\n+ assert \"action_context\" in bandit_feedback2.keys()\n+\n+\n+def test_calc_on_policy_policy_value_estimate():\n+ ground_truth_policy_value = OpenBanditDataset.calc_on_policy_policy_value_estimate(\n+ behavior_policy=\"random\", campaign=\"all\"\n+ )\n+ assert isinstance(ground_truth_policy_value, float)\n+\n+\n+def test_sample_bootstrap_bandit_feedback():\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback()\n+ bootstrap_bf = dataset.sample_bootstrap_bandit_feedback()\n+\n+ assert len(bandit_feedback[\"action\"]) == len(bootstrap_bf[\"action\"])\n+ assert len(bandit_feedback[\"position\"]) == len(bootstrap_bf[\"position\"])\n+ assert len(bandit_feedback[\"reward\"]) == len(bootstrap_bf[\"reward\"])\n+ assert len(bandit_feedback[\"pscore\"]) == len(bootstrap_bf[\"pscore\"])\n+ assert len(bandit_feedback[\"context\"]) == len(bootstrap_bf[\"context\"])\n" } ]
Python
Apache License 2.0
st-tech/zr-obp
apply reviewed points