author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
641,005 | 09.02.2021 02:55:44 | -32,400 | 53828f6a1c7f495ace812b94061c7e353e79cdc0 | implement sample_reward and calc_ground_truth_policy_value | [
{
"change_type": "MODIFY",
"old_path": "examples/online/README.md",
"new_path": "examples/online/README.md",
"diff": "@@ -27,6 +27,7 @@ python evaluate_off_policy_estimators.py\\\n--n_runs $n_runs\\\n--n_rounds $n_rounds\\\n--n_actions $n_actions\\\n+ --n_sim $n_sim\\\n--dim_context $dim_context\\\n--n_jobs $n_jobs\\\n--random_state $random_state\n@@ -34,6 +35,7 @@ python evaluate_off_policy_estimators.py\\\n- `$n_runs` specifies the number of simulation runs in the experiment to estimate standard deviations of the performance of OPE estimators.\n- `$n_rounds` and `$n_actions` specify the number of rounds (or samples) and the number of actions of the synthetic bandit data.\n- `$dim_context` specifies the dimension of context vectors.\n+- `$n_sim` specifeis the simulations in the Monte Carlo simulation to compute the ground-truth policy value.\n- `$evaluation_policy_name` specifeis the evaluation policy and should be one of \"bernoulli_ts\", \"epsilon_greedy\", \"lin_epsilon_greedy\", \"lin_ts, lin_ucb\", \"logistic_epsilon_greedy\", \"logistic_ts\", or \"logistic_ucb\".\n- `$n_jobs` is the maximum number of concurrently running jobs.\n@@ -42,10 +44,11 @@ For example, the following command compares the estimation performances (relativ\n```bash\npython evaluate_off_policy_estimators.py\\\n--n_runs 20\\\n- --n_rounds 100000\\\n+ --n_rounds 1000\\\n--n_actions 30\\\n--dim_context 5\\\n- --evaluation_policy_name bernoulli_ts\n+ --evaluation_policy_name bernoulli_ts\\\n+ --n_sim 3\\\n--n_jobs -1\\\n--random_state 12345\n@@ -54,7 +57,7 @@ python evaluate_off_policy_estimators.py\\\n# random_state=12345\n# ---------------------------------------------\n# mean std\n-# rm 0.010058 0.005635\n+# rm 0.202387 0.11685\n# =============================================\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/online/evaluate_off_policy_estimators.py",
"new_path": "examples/online/evaluate_off_policy_estimators.py",
"diff": "@@ -130,7 +130,7 @@ if __name__ == \"__main__\":\n# simulate the evaluation policy\naction_dist = run_bandit_simulation(bandit_feedback, evaluation_policy)\n# estimate the ground-truth policy values of the evaluation policy\n- # using the full expected reward contained in the test set of synthetic bandit feedback\n+ # by Monte-Carlo Simulation using p(r|x,a), the distribution of rewards\nground_truth_policy_value = calc_ground_truth_policy_value(\nbandit_feedback,\ndataset.sample_reward,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -184,13 +184,19 @@ class SyntheticBanditDataset(BaseBanditDataset):\nreturn expected_reward_\n- def sample_reward_from_mean(self, mean: np.ndarray) -> np.ndarray:\n- \"\"\"Sample reward given mean\"\"\"\n+ def sample_reward_given_expected_reward(\n+ self,\n+ expected_reward: np.ndarray,\n+ action: np.ndarray,\n+ ) -> np.ndarray:\n+ \"\"\"Sample reward given expected rewards\"\"\"\n+ expected_reward_factual = expected_reward[np.arange(action.shape[0]), action]\nif self.reward_type == \"binary\":\n- reward = self.random_.binomial(n=1, p=mean)\n+ reward = self.random_.binomial(n=1, p=expected_reward_factual)\nelif self.reward_type == \"continuous\":\n- a = (self._reward_min - mean) / self.reward_std\n- b = (self.reward_max_ - mean) / self.reward_std\n+ mean = expected_reward_factual\n+ a = (self.reward_min - mean) / self.reward_std\n+ b = (self.reward_max - mean) / self.reward_std\nreward = truncnorm.rvs(\na=a,\nb=b,\n@@ -214,14 +220,28 @@ class SyntheticBanditDataset(BaseBanditDataset):\nSelected actions to the contexts.\nReturns\n- -----------\n+ ---------\nreward: array-like, shape (n_rounds,)\nSampled rewards given contexts and actions.\n\"\"\"\n+ if not isinstance(context, np.ndarray):\n+ raise ValueError(\"context must be ndarray\")\n+ if not isinstance(action, np.ndarray):\n+ raise ValueError(\"action must be ndarray\")\n+ if context.ndim != 2:\n+ raise ValueError(f\"context must be 2-dimensional, but is {context.ndim}.\")\n+ if action.ndim != 1:\n+ raise ValueError(f\"action must be 1-dimensional, but is {action.ndim}.\")\n+ if context.shape[0] != action.shape[0]:\n+ raise ValueError(\n+ \"the size of axis 0 of context must be the same as that of action\"\n+ )\n+ if not np.issubdtype(int, action.dtype):\n+ raise ValueError(\"the dtype of action must be a subdtype of int\")\n+\nexpected_reward_ = self.calc_expected_reward(context)\n- expected_reward_factual = expected_reward_[np.arange(action.shape[0]), action]\n- return self.sample_reward_from_mean(expected_reward_factual)\n+ return self.sample_reward_given_expected_reward(expected_reward_, action)\ndef obtain_batch_bandit_feedback(self, n_rounds: int) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\n@@ -267,13 +287,12 @@ class SyntheticBanditDataset(BaseBanditDataset):\npscore = behavior_policy_[np.arange(n_rounds), action]\nexpected_reward_ = self.calc_expected_reward(context)\n- expected_reward_factual = expected_reward_[np.arange(n_rounds), action]\n- reward = self.sample_reward_from_mean(expected_reward_factual)\n+ reward = self.sample_reward_given_expected_reward(expected_reward_, action)\nif self.reward_type == \"continuous\":\n# correct expected_reward_, as we use truncated normal distribution here\n- mean = expected_reward_factual\n- a = (self._reward_min - mean) / self.reward_std\n- b = (self.reward_max_ - mean) / self.reward_std\n+ mean = expected_reward_\n+ a = (self.reward_min - mean) / self.reward_std\n+ b = (self.reward_max - mean) / self.reward_std\nexpected_reward_ = truncnorm.stats(\na=a, b=b, loc=mean, scale=self.reward_std, moments=\"m\"\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "@@ -92,27 +92,24 @@ def calc_ground_truth_policy_value(\nParameters\n----------\n- reward_type: str, default='binary'\n- Type of reward variable, which must be either 'binary' or 'continuous'.\n- When 'binary' is given, rewards are sampled from the Bernoulli distribution.\n- When 'continuous' is given, rewards are sampled from the truncated Normal distribution with `scale=1`.\n- The mean parameter of the reward distribution is determined by the `expected_reward` in the next argument.\n-\nbandit_feedback: BanditFeedback\nLogged bandit feedback data used in offline bandit simulation.\nIt must contain \"expected_rewards\".\n+ reward_sampler: Callable[[np.ndarray, np.ndarray], np.ndarray]\n+ Function sampling reward for each given action-context pair,\n+ i.e., :math:`p(r \\\\mid x, a)`.\n+\npolicy: BanditPolicy\nOnline bandit policy evaluated in offline bandit simulation (i.e., evaluation policy).\nn_sim: int, default=100\n- Number of simulations in the Monte Carlo simulation to compute the policy value\n-\n+ Number of simulations in the Monte Carlo simulation to compute the policy value.\nReturns\n--------\nground_truth_policy_value: float\n- policy value of a given action distribution (mostly evaluation policy).\n+ policy value of a given evaluation policy.\n\"\"\"\nfor key_ in [\n\"action\",\n@@ -132,10 +129,10 @@ def calc_ground_truth_policy_value(\n)\ncumulative_reward = 0.0\n+ dim_context = bandit_feedback[\"context\"].shape[1]\nfor _ in tqdm(np.arange(n_sim), total=n_sim):\npolicy_ = deepcopy(policy)\n- dim_context = bandit_feedback[\"context\"].shape[1]\nfor position_, context_, expected_reward_ in zip(\nbandit_feedback[\"position\"],\nbandit_feedback[\"context\"],\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -43,6 +43,64 @@ def test_synthetic_init():\nassert np.allclose(dataset.action_context, ohe)\n+# context, action, description\n+invalid_input_of_sample_reward = [\n+ (\"3\", np.ones(2, dtype=int), \"context must be ndarray\"),\n+ (None, np.ones(2, dtype=int), \"context must be ndarray\"),\n+ (np.ones((2, 3)), \"3\", \"action must be ndarray\"),\n+ (np.ones((2, 3)), None, \"action must be ndarray\"),\n+ (\n+ np.ones((2, 3)),\n+ np.ones(2, dtype=np.float32),\n+ \"the dtype of action must be a subdtype of int\",\n+ ),\n+ (np.ones(2), np.ones(2, dtype=int), \"context must be 2-dimensional, but is 1.\"),\n+ (\n+ np.ones((2, 3)),\n+ np.ones((2, 3), dtype=int),\n+ \"action must be 1-dimensional, but is 2.\",\n+ ),\n+ (\n+ np.ones((2, 3)),\n+ np.ones(3, dtype=int),\n+ \"the size of axis 0 of context must be the same as that of action\",\n+ ),\n+]\n+\n+valid_input_of_sample_reward = [\n+ (\n+ np.ones((2, 3)),\n+ np.ones(2, dtype=int),\n+ \"valid shape\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"context, action, description\",\n+ invalid_input_of_sample_reward,\n+)\n+def test_synthetic_sample_reward_using_invalid_inputs(context, action, description):\n+ n_actions = 10\n+ dataset = SyntheticBanditDataset(n_actions=n_actions)\n+\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dataset.sample_reward(context=context, action=action)\n+\n+\[email protected](\n+ \"context, action, description\",\n+ valid_input_of_sample_reward,\n+)\n+def test_synthetic_sample_reward_using_valid_inputs(context, action, description):\n+ n_actions = 10\n+ dataset = SyntheticBanditDataset(n_actions=n_actions, dim_context=3)\n+\n+ reward = dataset.sample_reward(context=context, action=action)\n+ assert isinstance(reward, np.ndarray), \"Invalid response of sample_reward\"\n+ assert reward.shape == action.shape, \"Invalid response of sample_reward\"\n+\n+\ndef test_synthetic_obtain_batch_bandit_feedback():\n# n_rounds\nwith pytest.raises(ValueError):\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement sample_reward and calc_ground_truth_policy_value |
641,006 | 20.02.2021 22:46:47 | -32,400 | a0a58f72a1e69d7d2c4c43146b85bde4f5209dd4 | apply review and fix minor tests | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/regression_model.py",
"new_path": "obp/ope/regression_model.py",
"diff": "@@ -144,16 +144,16 @@ class RegressionModel(BaseEstimator):\n)\nif position.max() >= self.len_list:\nraise ValueError(\n- f\"position elements must be smaller than len_list, but {position.max()}\"\n+ f\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n)\nif self.fitting_method in [\"iw\", \"mrdr\"]:\nif not (isinstance(action_dist, np.ndarray) and action_dist.ndim == 3):\nraise ValueError(\n- \"when fitting_method is either 'iw' or 'mrdr', action_dist must be a 3-dimensional ndarray\"\n+ \"when fitting_method is either 'iw' or 'mrdr', action_dist (a 3-dimensional ndarray) must be given\"\n)\nif action_dist.shape != (n_rounds, self.n_actions, self.len_list):\nraise ValueError(\n- f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list})\"\n+ f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list}), but is {action_dist.shape}\"\n)\nif not np.allclose(action_dist.sum(axis=1), 1):\nraise ValueError(\"action_dist must be a probability distribution\")\n@@ -316,16 +316,16 @@ class RegressionModel(BaseEstimator):\n)\nif position.max() >= self.len_list:\nraise ValueError(\n- f\"position elements must be smaller than len_list, but {position.max()}\"\n+ f\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n)\nif self.fitting_method in [\"iw\", \"mrdr\"]:\nif not (isinstance(action_dist, np.ndarray) and action_dist.ndim == 3):\nraise ValueError(\n- \"when fitting_method is either 'iw' or 'mrdr', action_dist must be a 3-dimensional ndarray\"\n+ \"when fitting_method is either 'iw' or 'mrdr', action_dist (a 3-dimensional ndarray) must be given\"\n)\nif action_dist.shape != (n_rounds, self.n_actions, self.len_list):\nraise ValueError(\n- f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list})\"\n+ f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list}), but is {action_dist.shape}\"\n)\nif pscore is None:\npscore = np.ones_like(action) / self.n_actions\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -170,7 +170,7 @@ def check_bandit_feedback_inputs(\nposition: Optional[np.ndarray] = None,\npscore: Optional[np.ndarray] = None,\naction_context: Optional[np.ndarray] = None,\n-) -> Optional[AssertionError]:\n+) -> Optional[ValueError]:\n\"\"\"Check inputs for bandit learning or simulation.\nParameters\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_regression_models.py",
"new_path": "tests/ope/test_regression_models.py",
"diff": "@@ -30,7 +30,7 @@ with open(cd_path / \"hyperparams.yaml\", \"rb\") as f:\n# action_context, n_actions, len_list, fitting_method, base_model, description\n-n_rounds = 10\n+n_rounds = 1000\nn_actions = 3\nlen_list = 3\n@@ -99,7 +99,7 @@ invalid_input_of_initializing_regression_models = [\ninvalid_input_of_fitting_regression_models = [\n(\nNone, #\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -131,7 +131,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nNone, #\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -147,7 +147,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7, 3)), #\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -163,7 +163,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=(n_rounds, 3)), #\n+ np.random.choice(n_actions, size=(n_rounds, 3)), #\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -179,7 +179,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=(n_rounds, 3)), #\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds),\n@@ -227,71 +227,71 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\n\"3\", #\nnp.random.choice(len_list, size=n_rounds),\nNone,\nn_actions,\nlen_list,\n- \"normal\",\n+ \"iw\",\nRidge(**hyperparams[\"ridge\"]),\n- None,\n+ generate_action_dist(n_rounds, n_actions, len_list),\n3,\n1,\n\"pscore must be ndarray\",\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones((n_rounds, 2)) * 2, #\nnp.random.choice(len_list, size=n_rounds),\nNone,\nn_actions,\nlen_list,\n- \"normal\",\n+ \"iw\",\nRidge(**hyperparams[\"ridge\"]),\n- None,\n+ generate_action_dist(n_rounds, n_actions, len_list),\n3,\n1,\n\"pscore must be 1-dimensional\",\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds - 1) * 2, #\nnp.random.choice(len_list, size=n_rounds),\nNone,\nn_actions,\nlen_list,\n- \"normal\",\n+ \"iw\",\nRidge(**hyperparams[\"ridge\"]),\n- None,\n+ generate_action_dist(n_rounds, n_actions, len_list),\n3,\n1,\n\"context, action, reward, and pscore must be the same size.\",\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.arange(n_rounds), #\nnp.random.choice(len_list, size=n_rounds),\nNone,\nn_actions,\nlen_list,\n- \"normal\",\n+ \"iw\",\nRidge(**hyperparams[\"ridge\"]),\n- None,\n+ generate_action_dist(n_rounds, n_actions, len_list),\n3,\n1,\n\"pscore must be positive\",\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n\"3\", #\n@@ -307,7 +307,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=(n_rounds, 3)), #\n@@ -323,7 +323,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice(len_list, size=n_rounds - 1), #\n@@ -339,7 +339,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice([\"a\", \"1\"], size=n_rounds), #\n@@ -355,7 +355,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds),\n+ np.random.choice(n_actions, size=n_rounds),\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nnp.random.choice([-1, -3], size=n_rounds), #\n@@ -371,7 +371,7 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds - 1), #\n+ np.random.choice(n_actions, size=n_rounds - 1), #\nnp.random.uniform(size=n_rounds),\nNone,\nNone,\n@@ -387,16 +387,16 @@ invalid_input_of_fitting_regression_models = [\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n- np.random.choice(range(n_actions), size=n_rounds - 1), #\n+ np.random.choice(n_actions, size=n_rounds - 1), #\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\nNone,\nNone,\nn_actions,\nlen_list,\n- \"normal\",\n+ \"iw\",\nRidge(**hyperparams[\"ridge\"]),\n- None,\n+ generate_action_dist(n_rounds, n_actions, len_list),\n3,\n1,\n\"context, action, reward, and pscore must be the same size\",\n@@ -645,6 +645,38 @@ valid_input_of_regression_models = [\n1,\n\"valid input without cross fitting\",\n),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ None,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"normal\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ None,\n+ 1,\n+ 1,\n+ \"valid input without pscore and action_dist\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.uniform(size=n_rounds),\n+ np.ones(n_rounds) * 2,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"iw\",\n+ Ridge(**hyperparams[\"ridge\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 1,\n+ 1,\n+ \"valid input when fitting_method is iw\",\n+ ),\n]\n@@ -706,9 +738,7 @@ def test_fitting_regression_models_using_invalid_input_data(\ncontext=context,\naction=action,\nreward=reward,\n- pscore=pscore,\nposition=position,\n- action_dist=action_dist,\nn_folds=n_folds,\nrandom_state=random_state,\n)\n@@ -760,9 +790,7 @@ def test_regression_models_using_valid_input_data(\ncontext=context,\naction=action,\nreward=reward,\n- pscore=pscore,\nposition=position,\n- action_dist=action_dist,\nn_folds=n_folds,\nrandom_state=random_state,\n)\n@@ -805,7 +833,7 @@ def test_performance_of_binary_outcome_models(\nfor model_name, model in binary_model_dict.items():\nregression_model = RegressionModel(\nn_actions=bandit_feedback[\"n_actions\"],\n- len_list=bandit_feedback[\"position\"].ndim,\n+ len_list=int(bandit_feedback[\"position\"].max() + 1),\naction_context=bandit_feedback[\"action_context\"],\nbase_model=model(**hyperparams[model_name]),\nfitting_method=fit_method,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | apply review and fix minor tests |
641,006 | 20.02.2021 23:14:22 | -32,400 | 41e6e488fffa2827fa88ae741b31e5f816911088 | remove switch-ipw | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -9,7 +9,6 @@ __all_estimators__ = [\n\"DirectMethod\",\n\"DoublyRobust\",\n\"DoublyRobustWithShrinkage\",\n- \"SwitchInverseProbabilityWeighting\",\n\"SwitchDoublyRobust\",\n\"SelfNormalizedDoublyRobust\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_all_estimators.py",
"new_path": "tests/ope/test_all_estimators.py",
"diff": "@@ -247,7 +247,7 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n)\n- # test most of the estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\n+ # test all estimators\nall_estimators = ope.__all_estimators__\nestimators = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n@@ -288,7 +288,7 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\nexpected_reward = np.expand_dims(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n)\n- # test most of the estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\n+ # test all estimators\nall_estimators = ope.__all_estimators__\nestimators = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n@@ -340,12 +340,12 @@ def test_performance_of_ope_estimators_using_random_evaluation_policy(\n# compute statistics of ground truth policy value\ngt_mean = q_pi_e.mean()\ngt_std = q_pi_e.std(ddof=1)\n- # test most of the estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\n+ # test most of the estimators (ReplayMethod is not tested because it is out of scope)\nall_estimators = ope.__all_estimators__\nestimators = [\ngetattr(ope.estimators, estimator_name)()\nfor estimator_name in all_estimators\n- if estimator_name not in [\"ReplayMethod\", \"SwitchInverseProbabilityWeighting\"]\n+ if estimator_name not in [\"ReplayMethod\"]\n]\n# conduct OPE\nope_instance = OffPolicyEvaluation(\n@@ -377,7 +377,7 @@ def test_response_format_of_ope_estimators_using_random_evaluation_policy(\nsynthetic_bandit_feedback[\"expected_reward\"], axis=-1\n)\naction_dist = random_action_dist\n- # test most of the estimators (ReplayMethod is not tested because it is out of scope; Switch-ipw(\\tau=1) is not tested because it is known to be biased in this situation)\n+ # test all estimators\nall_estimators = ope.__all_estimators__\nestimators = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -5,29 +5,24 @@ import numpy as np\nfrom obp.types import BanditFeedback\nfrom obp.ope import (\n- InverseProbabilityWeighting,\nDirectMethod,\nDoublyRobust,\nDoublyRobustWithShrinkage,\n- SwitchInverseProbabilityWeighting,\nSwitchDoublyRobust,\nSelfNormalizedDoublyRobust,\n)\nfrom conftest import generate_action_dist\n# prepare instances\n-ipw = InverseProbabilityWeighting()\ndm = DirectMethod()\ndr = DoublyRobust()\ndr_shrink_0 = DoublyRobustWithShrinkage(lambda_=0.0)\ndr_shrink_max = DoublyRobustWithShrinkage(lambda_=1e10)\nsndr = SelfNormalizedDoublyRobust()\n-switch_ipw_0 = SwitchInverseProbabilityWeighting(tau=0.0)\n-switch_ipw_max = SwitchInverseProbabilityWeighting(tau=1e10)\nswitch_dr_0 = SwitchDoublyRobust(tau=0.0)\nswitch_dr_max = SwitchDoublyRobust(tau=1e10)\n-dr_estimators = [dr, dr_shrink_0, sndr, switch_ipw_0, switch_dr_0]\n+dr_estimators = [dr, dr_shrink_0, sndr, switch_dr_0]\n# dr and self-normalized dr\n@@ -233,7 +228,7 @@ def test_dr_using_invalid_input_data(\n)\n-# switch-ipw and switch-dr\n+# switch-dr\ninvalid_input_of_switch = [\n(\"a\", \"switching hyperparameter must be float\"),\n@@ -249,9 +244,6 @@ def test_switch_using_invalid_input_data(tau: float, description: str) -> None:\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n_ = SwitchDoublyRobust(tau=tau)\n- with pytest.raises(ValueError, match=f\"{description}*\"):\n- _ = SwitchInverseProbabilityWeighting(tau=tau)\n-\n# dr-os\ninvalid_input_of_shrinkage = [\n@@ -300,9 +292,8 @@ def test_dr_variants_using_valid_input_data(\n) -> None:\n# check dr variants\nswitch_dr = SwitchDoublyRobust(tau=hyperparameter)\n- switch_ipw = SwitchInverseProbabilityWeighting(tau=hyperparameter)\ndr_os = DoublyRobustWithShrinkage(lambda_=hyperparameter)\n- for estimator in [switch_dr, switch_ipw, dr_os]:\n+ for estimator in [switch_dr, dr_os]:\nest = estimator.estimate_policy_value(\naction_dist=action_dist,\naction=action,\n@@ -409,36 +400,6 @@ def test_dr_shrinkage_using_random_evaluation_policy(\n), \"DoublyRobustWithShrinkage (lambda=inf) should be almost the same as DoublyRobust\"\n-def test_switch_ipw_using_random_evaluation_policy(\n- synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n-) -> None:\n- \"\"\"\n- Test the switch_ipw estimators using synthetic bandit data and random evaluation policy\n- \"\"\"\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n- action_dist = random_action_dist\n- # prepare input dict\n- input_dict = {\n- k: v\n- for k, v in synthetic_bandit_feedback.items()\n- if k in [\"reward\", \"action\", \"pscore\", \"position\"]\n- }\n- input_dict[\"action_dist\"] = action_dist\n- input_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\n- dm_value = dm.estimate_policy_value(**input_dict)\n- ipw_value = ipw.estimate_policy_value(**input_dict)\n- switch_ipw_0_value = switch_ipw_0.estimate_policy_value(**input_dict)\n- switch_ipw_max_value = switch_ipw_max.estimate_policy_value(**input_dict)\n- assert (\n- dm_value == switch_ipw_0_value\n- ), \"SwitchIPW (tau=0) should be the same as DirectMethod\"\n- assert (\n- ipw_value == switch_ipw_max_value\n- ), \"SwitchIPW (tau=1e10) should be the same as IPW\"\n-\n-\ndef test_switch_dr_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | remove switch-ipw |
641,006 | 20.02.2021 23:52:37 | -32,400 | 72a634db7d699d0ed84dadd7d27c4b35ba03a705 | add relative estimated policy value when summmarizing ope | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta.py",
"new_path": "obp/ope/meta.py",
"diff": "@@ -257,8 +257,18 @@ class OffPolicyEvaluation:\nrandom_state=random_state,\n)\n)\n-\n- return policy_value_df.T, policy_value_interval_df.T\n+ policy_value_of_behavior_policy = self.bandit_feedback[\"reward\"].mean()\n+ policy_value_df = policy_value_df.T\n+ if policy_value_of_behavior_policy <= 0:\n+ logger.warning(\n+ f\"Policy value of the behavior policy is {policy_value_of_behavior_policy} (<=0); relative estimated policy value is set to np.nan\"\n+ )\n+ policy_value_df[\"relative_estimated_policy_value\"] = np.nan\n+ else:\n+ policy_value_df[\"relative_estimated_policy_value\"] = (\n+ policy_value_df.estimated_policy_value / policy_value_of_behavior_policy\n+ )\n+ return policy_value_df, policy_value_interval_df.T\ndef visualize_off_policy_estimates(\nself,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_meta.py",
"new_path": "tests/ope/test_meta.py",
"diff": "from typing import Dict, Optional\nfrom dataclasses import dataclass\nimport itertools\n+from copy import deepcopy\nimport pytest\nimport numpy as np\n@@ -557,6 +558,10 @@ def test_meta_summarize_off_policy_estimates(\n},\nindex=[\"estimated_policy_value\"],\n).T\n+ expected_value[\"relative_estimated_policy_value\"] = (\n+ expected_value[\"estimated_policy_value\"]\n+ / synthetic_bandit_feedback[\"reward\"].mean()\n+ )\nexpected_interval = pd.DataFrame(\n{\n\"ipw\": {k: v + ipw.eps for k, v in mock_confidence_interval.items()},\n@@ -565,6 +570,30 @@ def test_meta_summarize_off_policy_estimates(\n).T\nassert_frame_equal(value, expected_value), \"Invalid summarization (policy value)\"\nassert_frame_equal(interval, expected_interval), \"Invalid summarization (interval)\"\n+ # check relative estimated policy value when the average of bandit_feedback[\"reward\"] is zero\n+ zero_reward_bandit_feedback = deepcopy(synthetic_bandit_feedback)\n+ zero_reward_bandit_feedback[\"reward\"] = np.zeros(\n+ zero_reward_bandit_feedback[\"reward\"].shape[0]\n+ )\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=zero_reward_bandit_feedback, ope_estimators=[ipw, ipw3]\n+ )\n+ value, _ = ope_.summarize_off_policy_estimates(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ expected_value = pd.DataFrame(\n+ {\n+ \"ipw\": mock_policy_value + ipw.eps,\n+ \"ipw3\": mock_policy_value + ipw3.eps,\n+ },\n+ index=[\"estimated_policy_value\"],\n+ ).T\n+ expected_value[\"relative_estimated_policy_value\"] = np.nan\n+ assert_frame_equal(value, expected_value), \"Invalid summarization (policy value)\"\ninvalid_input_of_evaluation_performance_of_estimators = [\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add relative estimated policy value when summmarizing ope |
641,003 | 21.02.2021 14:38:33 | -32,400 | ed49ea64ac02c7e2ff7a0e1618dc51f385cb0d8a | split output of obtain_batch_bandit_feedback when is_timeseries_split is True | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/real.py",
"new_path": "obp/dataset/real.py",
"diff": "@@ -6,6 +6,8 @@ from dataclasses import dataclass\nfrom logging import getLogger, basicConfig, INFO\nfrom pathlib import Path\nfrom typing import Optional\n+from typing import Union\n+from typing import Tuple\nimport numpy as np\nimport pandas as pd\n@@ -191,7 +193,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\ndef obtain_batch_bandit_feedback(\nself, test_size: float = 0.3, is_timeseries_split: bool = False\n- ) -> BanditFeedback:\n+ ) -> Union[BanditFeedback, Tuple[BanditFeedback, BanditFeedback]]:\n\"\"\"Obtain batch logged bandit feedback.\nParameters\n@@ -225,21 +227,26 @@ class OpenBanditDataset(BaseRealBanditDataset):\nf\"test_size must be a float in the (0,1) interval, but {test_size} is given\"\n)\nn_rounds_train = np.int(self.n_rounds * (1.0 - test_size))\n- return dict(\n+ bandit_feedback_train = dict(\nn_rounds=n_rounds_train,\nn_actions=self.n_actions,\naction=self.action[:n_rounds_train],\n- action_test=self.action[n_rounds_train:],\nposition=self.position[:n_rounds_train],\n- position_test=self.position[n_rounds_train:],\nreward=self.reward[:n_rounds_train],\n- reward_test=self.reward[n_rounds_train:],\npscore=self.pscore[:n_rounds_train],\n- pscore_test=self.pscore[n_rounds_train:],\ncontext=self.context[:n_rounds_train],\n- context_test=self.context[n_rounds_train:],\naction_context=self.action_context,\n)\n+ bandit_feedback_test = dict(\n+ n_actions=self.n_actions,\n+ action=self.action[n_rounds_train:],\n+ position=self.position[n_rounds_train:],\n+ reward=self.reward[n_rounds_train:],\n+ pscore=self.pscore[n_rounds_train:],\n+ context=self.context[n_rounds_train:],\n+ action_context=self.action_context,\n+ )\n+ return bandit_feedback_train, bandit_feedback_test\nelse:\nreturn dict(\nn_rounds=self.n_rounds,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | split output of obtain_batch_bandit_feedback when is_timeseries_split is True |
641,003 | 21.02.2021 14:50:37 | -32,400 | b0700f0857dddfb6b81be1edb5adfa2db58b7bd9 | fix bandit feedback test | [
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_real.py",
"new_path": "tests/dataset/test_real.py",
"diff": "@@ -2,6 +2,8 @@ import pytest\nimport numpy as np\nimport pandas as pd\n+from typing import Tuple\n+\nfrom obp.dataset import OpenBanditDataset\n@@ -63,22 +65,26 @@ def test_obtain_batch_bandit_feedback():\nassert \"action_context\" in bandit_feedback.keys()\n# is_timeseries_split=True\n- dataset2 = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n- bandit_feedback2 = dataset2.obtain_batch_bandit_feedback(is_timeseries_split=True)\n-\n- assert \"n_rounds\" in bandit_feedback2.keys()\n- assert \"n_actions\" in bandit_feedback2.keys()\n- assert \"action\" in bandit_feedback2.keys()\n- assert \"action_test\" in bandit_feedback2.keys()\n- assert \"position\" in bandit_feedback2.keys()\n- assert \"position_test\" in bandit_feedback2.keys()\n- assert \"reward\" in bandit_feedback2.keys()\n- assert \"reward_test\" in bandit_feedback2.keys()\n- assert \"pscore\" in bandit_feedback2.keys()\n- assert \"pscore_test\" in bandit_feedback2.keys()\n- assert \"context\" in bandit_feedback2.keys()\n- assert \"context_test\" in bandit_feedback2.keys()\n- assert \"action_context\" in bandit_feedback2.keys()\n+ bandit_feedback_timeseries = dataset.obtain_batch_bandit_feedback(\n+ is_timeseries_split=True\n+ )\n+ assert isinstance(bandit_feedback_timeseries, Tuple)\n+ bandit_feedback_train = bandit_feedback_timeseries[0]\n+ bandit_feedback_test = bandit_feedback_timeseries[1]\n+\n+ bf_train_elems = {\n+ \"n_rounds\",\n+ \"n_actions\",\n+ \"action\",\n+ \"position\",\n+ \"reward\",\n+ \"pscore\",\n+ \"context\",\n+ \"action_context\",\n+ }\n+ bf_test_elems = bf_train_elems - {\"n_rounds\"}\n+ assert all(k in bandit_feedback_train.keys() for k in bf_train_elems)\n+ assert all(k in bandit_feedback_test.keys() for k in bf_test_elems)\ndef test_calc_on_policy_policy_value_estimate():\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix bandit feedback test |
641,014 | 21.02.2021 16:53:59 | -32,400 | a3d171b579e347b2aa04fb616ceea8a865cb7ae3 | set None to position array of synthetic and classification data | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/multiclass.py",
"new_path": "obp/dataset/multiclass.py",
"diff": "@@ -107,7 +107,7 @@ class MultiClassToBanditReduction(BaseBanditDataset):\n[ 0., 0., 4., ..., 15., 3., 0.]]),\n'action': array([6, 8, 5, ..., 2, 5, 9]),\n'reward': array([1., 1., 1., ..., 1., 1., 1.]),\n- 'position': array([0, 0, 0, ..., 0, 0, 0]),\n+ 'position': None,\n'pscore': array([0.82, 0.82, 0.82, ..., 0.82, 0.82, 0.82])\n}\n@@ -252,7 +252,7 @@ class MultiClassToBanditReduction(BaseBanditDataset):\ncontext=self.X_ev,\naction=action,\nreward=reward,\n- position=np.zeros(self.n_rounds_ev, dtype=int),\n+ position=None, # position effect is not considered in classification data\npscore=pi_b[np.arange(self.n_rounds_ev), action],\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -100,7 +100,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\n[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]),\n'action': array([7, 4, 0, ..., 7, 9, 6]),\n- 'position': array([0, 0, 0, ..., 0, 0, 0]),\n+ 'position': None,\n'reward': array([0, 1, 1, ..., 0, 1, 0]),\n'expected_reward': array([[0.80210203, 0.73828559, 0.83199558, ..., 0.81190503, 0.70617705,\n0.68985306],\n@@ -308,7 +308,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\ncontext=context,\naction_context=self.action_context,\naction=action,\n- position=np.zeros(n_rounds, dtype=int),\n+ position=None, # position effect is not considered in synthetic data\nreward=reward,\nexpected_reward=expected_reward_,\npscore=pscore,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -68,7 +68,7 @@ class ReplayMethod(BaseOffPolicyEstimator):\nreward: np.ndarray,\naction: np.ndarray,\naction_dist: np.ndarray,\n- position: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate rewards for each round.\n@@ -84,7 +84,7 @@ class ReplayMethod(BaseOffPolicyEstimator):\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (must be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n- position: array-like, shape (n_rounds,)\n+ position: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\nReturns\n@@ -93,6 +93,8 @@ class ReplayMethod(BaseOffPolicyEstimator):\nRewards estimated by the Replay Method for each round.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\naction_match = np.array(\naction_dist[np.arange(action.shape[0]), action, position] == 1\n)\n@@ -257,9 +259,9 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate rewards for each round.\n@@ -272,21 +274,23 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nestimated_rewards: array-like, shape (n_rounds,)\nRewards estimated by IPW for each round.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\niw = action_dist[np.arange(action.shape[0]), action, position] / pscore\nreturn reward * iw\n@@ -471,9 +475,9 @@ class SelfNormalizedInverseProbabilityWeighting(InverseProbabilityWeighting):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate rewards for each round.\n@@ -486,21 +490,23 @@ class SelfNormalizedInverseProbabilityWeighting(InverseProbabilityWeighting):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nestimated_rewards: array-like, shape (n_rounds,)\nRewards estimated by the SNIPW estimator for each round.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\niw = action_dist[np.arange(action.shape[0]), action, position] / pscore\nreturn reward * iw / iw.mean()\n@@ -551,30 +557,32 @@ class DirectMethod(BaseOffPolicyEstimator):\ndef _estimate_round_rewards(\nself,\n- position: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> float:\n\"\"\"Estimate policy value of an evaluation policy.\nParameters\n----------\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nestimated_rewards: array-like, shape (n_rounds,)\nRewards estimated by the DM estimator for each round.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nn_rounds = position.shape[0]\nq_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n@@ -743,10 +751,10 @@ class DoublyRobust(BaseOffPolicyEstimator):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate rewards for each round.\n@@ -759,9 +767,6 @@ class DoublyRobust(BaseOffPolicyEstimator):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -771,12 +776,17 @@ class DoublyRobust(BaseOffPolicyEstimator):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nestimated_rewards: array-like, shape (n_rounds,)\nRewards estimated by the DR estimator for each round.\n\"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\nn_rounds = action.shape[0]\niw = action_dist[np.arange(n_rounds), action, position] / pscore\nq_hat_at_position = estimated_rewards_by_reg_model[\n@@ -990,10 +1000,10 @@ class SelfNormalizedDoublyRobust(DoublyRobust):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate rewards for each round.\n@@ -1006,9 +1016,6 @@ class SelfNormalizedDoublyRobust(DoublyRobust):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -1018,6 +1025,9 @@ class SelfNormalizedDoublyRobust(DoublyRobust):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nestimated_rewards: array-like, shape (n_rounds,)\n@@ -1101,10 +1111,10 @@ class SwitchDoublyRobust(DoublyRobust):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> float:\n\"\"\"Estimate rewards for each round.\n@@ -1117,9 +1127,6 @@ class SwitchDoublyRobust(DoublyRobust):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -1129,6 +1136,9 @@ class SwitchDoublyRobust(DoublyRobust):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nestimated_rewards: array-like, shape (n_rounds,)\n@@ -1224,10 +1234,10 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nself,\nreward: np.ndarray,\naction: np.ndarray,\n- position: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate rewards for each round.\n@@ -1240,9 +1250,6 @@ class DoublyRobustWithShrinkage(DoublyRobust):\naction: array-like, shape (n_rounds,)\nAction sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- position: array-like, shape (n_rounds,)\n- Positions of each round in the given logged bandit feedback.\n-\npscore: array-like, shape (n_rounds,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -1252,6 +1259,9 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\nReturns\n----------\nestimated_rewards: array-like, shape (n_rounds,)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "@@ -46,6 +46,10 @@ def run_bandit_simulation(\npolicy_ = policy\nselected_actions_list = list()\ndim_context = bandit_feedback[\"context\"].shape[1]\n+ if bandit_feedback[\"position\"] is None:\n+ bandit_feedback[\"position\"] = np.zeros_like(\n+ bandit_feedback[\"action\"], dtype=int\n+ )\nfor action_, reward_, position_, context_ in tqdm(\nzip(\nbandit_feedback[\"action\"],\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -137,10 +137,7 @@ def test_synthetic_obtain_batch_bandit_feedback():\nbandit_feedback[\"action\"].ndim == 1\nand len(bandit_feedback[\"action\"]) == n_rounds\n)\n- assert (\n- bandit_feedback[\"position\"].ndim == 1\n- and len(bandit_feedback[\"position\"]) == n_rounds\n- )\n+ assert bandit_feedback[\"position\"] is None\nassert (\nbandit_feedback[\"reward\"].ndim == 1\nand len(bandit_feedback[\"reward\"]) == n_rounds\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/conftest.py",
"new_path": "tests/ope/conftest.py",
"diff": "@@ -92,9 +92,7 @@ def expected_reward_0() -> np.ndarray:\[email protected](scope=\"session\")\ndef random_action_dist(synthetic_bandit_feedback) -> np.ndarray:\nn_actions = synthetic_bandit_feedback[\"n_actions\"]\n- evaluation_policy = Random(\n- n_actions=n_actions, len_list=synthetic_bandit_feedback[\"position\"].ndim\n- )\n+ evaluation_policy = Random(n_actions=n_actions, len_list=1)\naction_dist = evaluation_policy.compute_batch_action_dist(\nn_rounds=synthetic_bandit_feedback[\"n_rounds\"]\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_regression_models.py",
"new_path": "tests/ope/test_regression_models.py",
"diff": "@@ -833,7 +833,6 @@ def test_performance_of_binary_outcome_models(\nfor model_name, model in binary_model_dict.items():\nregression_model = RegressionModel(\nn_actions=bandit_feedback[\"n_actions\"],\n- len_list=int(bandit_feedback[\"position\"].max() + 1),\naction_context=bandit_feedback[\"action_context\"],\nbase_model=model(**hyperparams[model_name]),\nfitting_method=fit_method,\n@@ -854,7 +853,6 @@ def test_performance_of_binary_outcome_models(\naction=bandit_feedback[\"action\"],\nreward=bandit_feedback[\"reward\"],\npscore=bandit_feedback[\"pscore\"],\n- position=bandit_feedback[\"position\"],\naction_dist=action_dist,\nn_folds=3, # 3-fold cross-fitting\nrandom_state=random_state,\n@@ -864,7 +862,7 @@ def test_performance_of_binary_outcome_models(\ny_score=estimated_rewards_by_reg_model[\nnp.arange(bandit_feedback[\"reward\"].shape[0]),\nbandit_feedback[\"action\"],\n- bandit_feedback[\"position\"],\n+ np.zeros_like(bandit_feedback[\"action\"], dtype=int),\n],\n)\n# compare dr criteria\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | set None to position array of synthetic and classification data |
641,003 | 21.02.2021 17:45:52 | -32,400 | 49f389710b4b0da259bef666def0e5a1cd153410 | remove reward_test and add n_rounds | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/real.py",
"new_path": "obp/dataset/real.py",
"diff": "@@ -149,13 +149,16 @@ class OpenBanditDataset(BaseRealBanditDataset):\nThis parameter is used as a ground-truth policy value in the evaluation of OPE estimators.\n\"\"\"\n- return (\n- cls(behavior_policy=behavior_policy, campaign=campaign, data_path=data_path)\n- .obtain_batch_bandit_feedback(\n+ bandit_feedback = cls(\n+ behavior_policy=behavior_policy, campaign=campaign, data_path=data_path\n+ ).obtain_batch_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split\n- )[\"reward_test\"]\n- .mean()\n)\n+ if is_timeseries_split:\n+ bandit_feedback_test = bandit_feedback[1]\n+ else:\n+ bandit_feedback_test = bandit_feedback\n+ return bandit_feedback_test[\"reward\"].mean()\ndef load_raw_data(self) -> None:\n\"\"\"Load raw open bandit dataset.\"\"\"\n@@ -238,6 +241,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\naction_context=self.action_context,\n)\nbandit_feedback_test = dict(\n+ n_rounds=n_rounds_train,\nn_actions=self.n_actions,\naction=self.action[n_rounds_train:],\nposition=self.position[n_rounds_train:],\n@@ -254,7 +258,6 @@ class OpenBanditDataset(BaseRealBanditDataset):\naction=self.action,\nposition=self.position,\nreward=self.reward,\n- reward_test=self.reward,\npscore=self.pscore,\ncontext=self.context,\naction_context=self.action_context,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_real.py",
"new_path": "tests/dataset/test_real.py",
"diff": "@@ -72,7 +72,7 @@ def test_obtain_batch_bandit_feedback():\nbandit_feedback_train = bandit_feedback_timeseries[0]\nbandit_feedback_test = bandit_feedback_timeseries[1]\n- bf_train_elems = {\n+ bf_elems = {\n\"n_rounds\",\n\"n_actions\",\n\"action\",\n@@ -82,9 +82,8 @@ def test_obtain_batch_bandit_feedback():\n\"context\",\n\"action_context\",\n}\n- bf_test_elems = bf_train_elems - {\"n_rounds\"}\n- assert all(k in bandit_feedback_train.keys() for k in bf_train_elems)\n- assert all(k in bandit_feedback_test.keys() for k in bf_test_elems)\n+ assert all(k in bandit_feedback_train.keys() for k in bf_elems)\n+ assert all(k in bandit_feedback_test.keys() for k in bf_elems)\ndef test_calc_on_policy_policy_value_estimate():\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | remove reward_test and add n_rounds |
641,003 | 21.02.2021 18:04:05 | -32,400 | 027d944c2c1d145101909e66ced9109276cecd50 | fix sample_bootstrap_bandit_feedback | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/real.py",
"new_path": "obp/dataset/real.py",
"diff": "@@ -299,6 +299,11 @@ class OpenBanditDataset(BaseRealBanditDataset):\n- action_context: item-related context vectors\n\"\"\"\n+ if is_timeseries_split:\n+ bandit_feedback = self.obtain_batch_bandit_feedback(\n+ test_size=test_size, is_timeseries_split=is_timeseries_split\n+ )[0]\n+ else:\nbandit_feedback = self.obtain_batch_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_real.py",
"new_path": "tests/dataset/test_real.py",
"diff": "@@ -3,6 +3,7 @@ import numpy as np\nimport pandas as pd\nfrom typing import Tuple\n+from typing import Dict\nfrom obp.dataset import OpenBanditDataset\n@@ -98,8 +99,15 @@ def test_sample_bootstrap_bandit_feedback():\nbandit_feedback = dataset.obtain_batch_bandit_feedback()\nbootstrap_bf = dataset.sample_bootstrap_bandit_feedback()\n- assert len(bandit_feedback[\"action\"]) == len(bootstrap_bf[\"action\"])\n- assert len(bandit_feedback[\"position\"]) == len(bootstrap_bf[\"position\"])\n- assert len(bandit_feedback[\"reward\"]) == len(bootstrap_bf[\"reward\"])\n- assert len(bandit_feedback[\"pscore\"]) == len(bootstrap_bf[\"pscore\"])\n- assert len(bandit_feedback[\"context\"]) == len(bootstrap_bf[\"context\"])\n+ bf_keys = {\"action\", \"position\", \"reward\", \"pscore\", \"context\"}\n+ for k in bf_keys:\n+ assert len(bandit_feedback[k]) == len(bootstrap_bf[k])\n+\n+ bandit_feedback_timeseries: Dict = dataset.obtain_batch_bandit_feedback(\n+ is_timeseries_split=True\n+ )[0]\n+ bootstrap_bf_timeseries = dataset.sample_bootstrap_bandit_feedback(\n+ is_timeseries_split=True\n+ )\n+ for k in bf_keys:\n+ assert len(bandit_feedback_timeseries[k]) == len(bootstrap_bf_timeseries[k])\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix sample_bootstrap_bandit_feedback |
641,014 | 22.02.2021 15:13:13 | -32,400 | b638601f2da125ab792ec03e99b9540e656a3e5a | fix reg model to allow position=None | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/regression_model.py",
"new_path": "obp/ope/regression_model.py",
"diff": "@@ -135,13 +135,9 @@ class RegressionModel(BaseEstimator):\n)\nn_rounds = context.shape[0]\n- if self.len_list == 1:\n+ if position is None or self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\n- if not (isinstance(position, np.ndarray) and position.ndim == 1):\n- raise ValueError(\n- \"when len_list > 1, position must be a 1-dimensional ndarray\"\n- )\nif position.max() >= self.len_list:\nraise ValueError(\nf\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n@@ -307,13 +303,9 @@ class RegressionModel(BaseEstimator):\nf\"random_state must be an integer, but {random_state} is given\"\n)\n- if self.len_list == 1:\n+ if position is None or self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\n- if not (isinstance(position, np.ndarray) and position.ndim == 1):\n- raise ValueError(\n- \"when len_list > 1, position must be a 1-dimensional ndarray\"\n- )\nif position.max() >= self.len_list:\nraise ValueError(\nf\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_regression_models.py",
"new_path": "tests/ope/test_regression_models.py",
"diff": "@@ -454,7 +454,7 @@ invalid_input_of_fitting_regression_models = [\nnp.arange(n_rounds) % n_actions,\nnp.random.uniform(size=n_rounds),\nnp.ones(n_rounds) * 2,\n- None, #\n+ np.ones((n_rounds, 2)), #\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\nlen_list,\n@@ -463,7 +463,7 @@ invalid_input_of_fitting_regression_models = [\nNone,\n3,\n1,\n- \"when len_list > 1, position must be a 1-dimensional ndarray\",\n+ \"position must be 1-dimensional\",\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n@@ -650,16 +650,16 @@ valid_input_of_regression_models = [\nnp.arange(n_rounds) % n_actions,\nnp.random.uniform(size=n_rounds),\nNone,\n- np.random.choice(len_list, size=n_rounds),\n+ None,\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\n- len_list,\n+ 1,\n\"normal\",\nRidge(**hyperparams[\"ridge\"]),\nNone,\n1,\n1,\n- \"valid input without pscore and action_dist\",\n+ \"valid input without pscore, position, and action_dist\",\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix reg model to allow position=None |
641,014 | 22.02.2021 19:57:50 | -32,400 | 5c122f756140121cb9302d45eba24f9fa8a1a38e | adjust benchmark/ to recent changes | [
{
"change_type": "MODIFY",
"old_path": "benchmark/README.md",
"new_path": "benchmark/README.md",
"diff": "---\nThis directory includes some benchmark experiments and demonstrations about off-policy evaluation using [the full size Open Bandit Dataset](https://research.zozo.com/data.html). The detailed description, results, and discussions can be found in [the relevant paper](https://arxiv.org/abs/2008.07146).\n-- `cf_policy_search`: counterfactual policy search using OPE\n-- `ope`: estimation performance comparisons on a variety of OPE estimators\n+- [`cf_policy_search`](./cf_policy_search): counterfactual policy search using OPE\n+- [`ope`](./ope): estimation performance comparisons on a variety of OPE estimators\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/README.md",
"new_path": "benchmark/ope/README.md",
"diff": "@@ -49,33 +49,6 @@ python train_regression_model.py\\\n--is_timeseries_split False\n```\n-<!--\n-```\n-for model in random_forest\n-do\n- for pi_b in bts\n- do\n- for camp in all\n- do\n- for is_mrdr in True False\n- do\n- for is_timeseries in True False\n- do\n- python train_regression_model.py\\\n- --n_runs 30\\\n- --base_model $model\\\n- --behavior_policy $pi_b\\\n- --campaign $camp\\\n- --is_mrdr $is_mrdr\\\n- --n_jobs 1\\\n- --is_timeseries_split $is_timeseries\n- done\n- done\n- done\n- done\n-done\n-``` -->\n-\n## Evaluating Off-Policy Estimators\n@@ -133,27 +106,3 @@ python benchmark_off_policy_estimators.py\\\n```\nThe results of our benchmark experiments can be found in Section 5 of [our paper](https://arxiv.org/abs/2008.07146).\n-\n-<!--\n-```\n-for model in logistic_regression\n-do\n- for pi_b in random\n- do\n- for camp in women all\n- do\n- for is_timeseries in True False\n- do\n- python benchmark_off_policy_estimators.py\\\n- --n_runs 30\\\n- --base_model $model\\\n- --behavior_policy $pi_b\\\n- --campaign $camp\\\n- --n_jobs 10\\\n- --is_timeseries_split $is_timeseries\n- done\n- done\n- done\n-done\n-```\n--->\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/benchmark_off_policy_estimators.py",
"new_path": "benchmark/ope/benchmark_off_policy_estimators.py",
"diff": "@@ -27,18 +27,18 @@ ope_estimators = [\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\nSelfNormalizedDoublyRobust(),\n- SwitchDoublyRobust(tau=5, estimator_name=\"switch-dr (tau=5)\"),\n- SwitchDoublyRobust(tau=10, estimator_name=\"switch-dr (tau=10)\"),\n- SwitchDoublyRobust(tau=50, estimator_name=\"switch-dr (tau=50)\"),\n- SwitchDoublyRobust(tau=100, estimator_name=\"switch-dr (tau=100)\"),\n- SwitchDoublyRobust(tau=500, estimator_name=\"switch-dr (tau=500)\"),\n- SwitchDoublyRobust(tau=1000, estimator_name=\"switch-dr (tau=1000)\"),\n- DoublyRobustWithShrinkage(lambda_=5, estimator_name=\"dr-os (lambda=5)\"),\n- DoublyRobustWithShrinkage(lambda_=10, estimator_name=\"dr-os (lambda=10)\"),\n- DoublyRobustWithShrinkage(lambda_=50, estimator_name=\"dr-os (lambda=50)\"),\n- DoublyRobustWithShrinkage(lambda_=100, estimator_name=\"dr-os (lambda=100)\"),\n- DoublyRobustWithShrinkage(lambda_=500, estimator_name=\"dr-os (lambda=500)\"),\n- DoublyRobustWithShrinkage(lambda_=1000, estimator_name=\"dr-os (lambda=1000)\"),\n+ SwitchDoublyRobust(tau=5.0, estimator_name=\"switch-dr (tau=5)\"),\n+ SwitchDoublyRobust(tau=10.0, estimator_name=\"switch-dr (tau=10)\"),\n+ SwitchDoublyRobust(tau=50.0, estimator_name=\"switch-dr (tau=50)\"),\n+ SwitchDoublyRobust(tau=100.0, estimator_name=\"switch-dr (tau=100)\"),\n+ SwitchDoublyRobust(tau=500.0, estimator_name=\"switch-dr (tau=500)\"),\n+ SwitchDoublyRobust(tau=1000.0, estimator_name=\"switch-dr (tau=1000)\"),\n+ DoublyRobustWithShrinkage(lambda_=5.0, estimator_name=\"dr-os (lambda=5)\"),\n+ DoublyRobustWithShrinkage(lambda_=10.0, estimator_name=\"dr-os (lambda=10)\"),\n+ DoublyRobustWithShrinkage(lambda_=50.0, estimator_name=\"dr-os (lambda=50)\"),\n+ DoublyRobustWithShrinkage(lambda_=100.0, estimator_name=\"dr-os (lambda=100)\"),\n+ DoublyRobustWithShrinkage(lambda_=500.0, estimator_name=\"dr-os (lambda=500)\"),\n+ DoublyRobustWithShrinkage(lambda_=1000.0, estimator_name=\"dr-os (lambda=1000)\"),\n]\nif __name__ == \"__main__\":\n@@ -161,11 +161,19 @@ if __name__ == \"__main__\":\nwith open(reg_model_path / f\"is_for_reg_model_{b}.pkl\", \"rb\") as f:\nis_for_reg_model = pickle.load(f)\n# sample bootstrap samples from batch logged bandit feedback\n+ if is_timeseries_split:\nbandit_feedback = obd.sample_bootstrap_bandit_feedback(\ntest_size=test_size,\nis_timeseries_split=is_timeseries_split,\nrandom_state=b,\n)\n+ else:\n+ bandit_feedback = obd.sample_bootstrap_bandit_feedback(\n+ test_size=test_size,\n+ is_timeseries_split=is_timeseries_split,\n+ random_state=b,\n+ )\n+ bandit_feedback[\"n_rounds\"] = (~is_for_reg_model).sum()\nfor key_ in [\"context\", \"action\", \"reward\", \"pscore\", \"position\"]:\nbandit_feedback[key_] = bandit_feedback[key_][~is_for_reg_model]\n# estimate the mean reward function using the pre-trained reg_model\n@@ -197,7 +205,6 @@ if __name__ == \"__main__\":\nreturn relative_ee_b\nprocessed = Parallel(\n- backend=\"multiprocessing\",\nn_jobs=n_jobs,\nverbose=50,\n)([delayed(process)(i) for i in np.arange(n_runs)])\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/train_regression_model.py",
"new_path": "benchmark/ope/train_regression_model.py",
"diff": "import argparse\n+from copy import deepcopy\nimport yaml\nimport pickle\nfrom distutils.util import strtobool\n@@ -8,7 +9,7 @@ from typing import Dict\nimport numpy as np\nfrom pandas import DataFrame\nfrom joblib import Parallel, delayed\n-from sklearn.experimental import enable_hist_gradient_boosting\n+from sklearn.experimental import enable_hist_gradient_boosting # noqa\nfrom sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import log_loss, roc_auc_score\n@@ -39,25 +40,15 @@ def relative_ce(y_true: np.ndarray, y_pred: np.ndarray) -> float:\ndef evaluate_reg_model(\nbandit_feedback: BanditFeedback,\n- is_timeseries_split: bool,\nestimated_rewards_by_reg_model: np.ndarray,\n- is_for_reg_model: bool,\n) -> Dict[str, float]:\n\"\"\"Evaluate the estimation performance of regression model by AUC and RCE.\"\"\"\nperformance_reg_model = dict(auc=0.0, rce=0.0)\n- if is_timeseries_split:\n- factual_rewards = bandit_feedback[\"reward_test\"]\n+ factual_rewards = bandit_feedback[\"reward\"]\nestimated_factual_rewards = estimated_rewards_by_reg_model[\nnp.arange(factual_rewards.shape[0]),\n- bandit_feedback[\"action_test\"].astype(int),\n- bandit_feedback[\"position_test\"].astype(int),\n- ]\n- else:\n- factual_rewards = bandit_feedback[\"reward\"][~is_for_reg_model]\n- estimated_factual_rewards = estimated_rewards_by_reg_model[\n- np.arange((~is_for_reg_model).sum()),\n- bandit_feedback[\"action\"][~is_for_reg_model].astype(int),\n- bandit_feedback[\"position\"][~is_for_reg_model].astype(int),\n+ bandit_feedback[\"action\"].astype(int),\n+ bandit_feedback[\"position\"].astype(int),\n]\nperformance_reg_model[\"auc\"] = roc_auc_score(\ny_true=factual_rewards, y_score=estimated_factual_rewards\n@@ -139,6 +130,7 @@ if __name__ == \"__main__\":\ntest_size = args.test_size\nis_timeseries_split = args.is_timeseries_split\nis_mrdr = args.is_mrdr\n+ fitting_method = \"mrdr\" if is_mrdr else \"normal\"\nn_sim_to_compute_action_dist = args.n_sim_to_compute_action_dist\nn_jobs = args.n_jobs\nrandom_state = args.random_state\n@@ -159,7 +151,6 @@ if __name__ == \"__main__\":\n)\n# action distribution by evaluation policy\n# (more robust doubly robust needs evaluation policy information)\n- if is_mrdr:\nif behavior_policy == \"random\":\npolicy = BernoulliTS(\nn_actions=obd.n_actions,\n@@ -178,86 +169,75 @@ if __name__ == \"__main__\":\nn_sim=n_sim_to_compute_action_dist\n)\n- def process(b: int):\n+ def process(b: int) -> Dict[str, float]:\n# sample bootstrap from batch logged bandit feedback\n- bandit_feedback = obd.sample_bootstrap_bandit_feedback(\n+ if is_timeseries_split:\n+ bandit_feedback_train = obd.sample_bootstrap_bandit_feedback(\n+ test_size=test_size,\n+ is_timeseries_split=True,\n+ random_state=b,\n+ )\n+ bandit_feedback_test = obd.obtain_batch_bandit_feedback(\ntest_size=test_size,\n- is_timeseries_split=is_timeseries_split,\n+ is_timeseries_split=True,\n+ )[1]\n+ else:\n+ bandit_feedback_train = obd.sample_bootstrap_bandit_feedback(\nrandom_state=b,\n)\n+ bandit_feedback_test = deepcopy(bandit_feedback_train)\n# split data into two folds (data for training reg_model and for ope)\nis_for_reg_model = np.random.binomial(\n- n=1, p=0.3, size=bandit_feedback[\"n_rounds\"]\n+ n=1, p=0.3, size=bandit_feedback_train[\"n_rounds\"]\n).astype(bool)\nwith open(reg_model_path / f\"is_for_reg_model_{b}.pkl\", \"wb\") as f:\npickle.dump(\nis_for_reg_model,\nf,\n)\n- if is_mrdr:\n+ bandit_feedback_train[\"n_rounds\"] = is_for_reg_model.sum()\n+ bandit_feedback_test[\"n_rounds\"] = (~is_for_reg_model).sum()\n+ for key in [\"context\", \"action\", \"reward\", \"pscore\", \"position\"]:\n+ bandit_feedback_train[key] = bandit_feedback_train[key][\n+ is_for_reg_model\n+ ]\n+ bandit_feedback_test[key] = bandit_feedback_test[key][~is_for_reg_model]\n+ model_file_name = f\"reg_model_mrdr_{b}.pkl\" if is_mrdr else f\"reg_model_{b}.pkl\"\nreg_model = RegressionModel(\nn_actions=obd.n_actions,\nlen_list=obd.len_list,\n- action_context=bandit_feedback[\"action_context\"],\n+ action_context=bandit_feedback_train[\"action_context\"],\nbase_model=base_model_dict[base_model](**hyperparams[base_model]),\n- fitting_method=\"mrdr\",\n+ fitting_method=fitting_method,\n)\n# train regression model on logged bandit feedback data\nreg_model.fit(\n- context=bandit_feedback[\"context\"][is_for_reg_model],\n- action=bandit_feedback[\"action\"][is_for_reg_model],\n- reward=bandit_feedback[\"reward\"][is_for_reg_model],\n- pscore=bandit_feedback[\"pscore\"][is_for_reg_model],\n- position=bandit_feedback[\"position\"][is_for_reg_model],\n+ context=bandit_feedback_train[\"context\"],\n+ action=bandit_feedback_train[\"action\"],\n+ reward=bandit_feedback_train[\"reward\"],\n+ pscore=bandit_feedback_train[\"pscore\"],\n+ position=bandit_feedback_train[\"position\"],\naction_dist=np.tile(\n- action_dist_single_round, (is_for_reg_model.sum(), 1, 1)\n+ action_dist_single_round, (bandit_feedback_train[\"n_rounds\"], 1, 1)\n),\n)\n- with open(reg_model_path / f\"reg_model_mrdr_{b}.pkl\", \"wb\") as f:\n- pickle.dump(\n- reg_model,\n- f,\n- )\n- else:\n- reg_model = RegressionModel(\n- n_actions=obd.n_actions,\n- len_list=obd.len_list,\n- action_context=bandit_feedback[\"action_context\"],\n- base_model=base_model_dict[base_model](**hyperparams[base_model]),\n- fitting_method=\"normal\",\n- )\n- # train regression model on logged bandit feedback data\n- reg_model.fit(\n- context=bandit_feedback[\"context\"][is_for_reg_model],\n- action=bandit_feedback[\"action\"][is_for_reg_model],\n- reward=bandit_feedback[\"reward\"][is_for_reg_model],\n- position=bandit_feedback[\"position\"][is_for_reg_model],\n- )\n- with open(reg_model_path / f\"reg_model_{b}.pkl\", \"wb\") as f:\n+ with open(reg_model_path / model_file_name, \"wb\") as f:\npickle.dump(\nreg_model,\nf,\n)\n# evaluate the estimation performance of the regression model by AUC and RCE\n- if is_timeseries_split:\n- estimated_rewards_by_reg_model = reg_model.predict(\n- context=bandit_feedback[\"context_test\"],\n- )\n- else:\nestimated_rewards_by_reg_model = reg_model.predict(\n- context=bandit_feedback[\"context\"][~is_for_reg_model],\n+ context=bandit_feedback_test[\"context\"],\n)\nperformance_reg_model_b = evaluate_reg_model(\n- bandit_feedback=bandit_feedback,\n- is_timeseries_split=is_timeseries_split,\n+ bandit_feedback=bandit_feedback_test,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n- is_for_reg_model=is_for_reg_model,\n)\nreturn performance_reg_model_b\nprocessed = Parallel(\n- backend=\"multiprocessing\",\nn_jobs=n_jobs,\nverbose=50,\n)([delayed(process)(i) for i in np.arange(n_runs)])\n@@ -268,5 +248,5 @@ if __name__ == \"__main__\":\nfor metric, metric_value in performance_reg_model_b.items():\nperformance_reg_model[metric][b] = metric_value\nDataFrame(performance_reg_model).describe().T.round(6).to_csv(\n- log_path / f\"performance_reg_model.csv\"\n+ log_path / \"performance_reg_model.csv\"\n)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | adjust benchmark/ to recent changes |
641,014 | 22.02.2021 19:58:12 | -32,400 | 98d2432123ab00002a4ce4fa39fc00214627d68b | fix n_rounds of bandit_feedback_test | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/real.py",
"new_path": "obp/dataset/real.py",
"diff": "@@ -241,7 +241,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\naction_context=self.action_context,\n)\nbandit_feedback_test = dict(\n- n_rounds=n_rounds_train,\n+ n_rounds=(self.n_rounds - n_rounds_train),\nn_actions=self.n_actions,\naction=self.action[n_rounds_train:],\nposition=self.position[n_rounds_train:],\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix n_rounds of bandit_feedback_test |
641,014 | 22.02.2021 19:58:32 | -32,400 | baaf874686ac5aef4faf081510f32be3e4a2148b | rm unnecessary value error about random_state | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -148,11 +148,8 @@ class SyntheticBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"reward_type must be either 'binary' or 'continuous, but {self.reward_type} is given.'\"\n)\n- if not isinstance(self.random_state, int):\n- raise ValueError(\n- f\"random_state must be an integer, but {self.random_state} is given\"\n- )\n-\n+ if self.random_state is None:\n+ raise ValueError(\"random_state must be given\")\nself.random_ = check_random_state(self.random_state)\nif self.reward_function is None:\nself.expected_reward = self.sample_contextfree_expected_reward()\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | rm unnecessary value error about random_state |
641,014 | 22.02.2021 19:59:05 | -32,400 | c53da163170e790d6abede6d0f22ab2839011f8b | change backend of Parallel | [
{
"change_type": "MODIFY",
"old_path": "examples/multiclass/evaluate_off_policy_estimators.py",
"new_path": "examples/multiclass/evaluate_off_policy_estimators.py",
"diff": "@@ -48,10 +48,10 @@ ope_estimators = [\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\nSelfNormalizedDoublyRobust(),\n- SwitchDoublyRobust(tau=1., estimator_name=\"switch-dr (tau=1)\"),\n- SwitchDoublyRobust(tau=100., estimator_name=\"switch-dr (tau=100)\"),\n- DoublyRobustWithShrinkage(lambda_=1., estimator_name=\"dr-os (lambda=1)\"),\n- DoublyRobustWithShrinkage(lambda_=100., estimator_name=\"dr-os (lambda=100)\"),\n+ SwitchDoublyRobust(tau=1.0, estimator_name=\"switch-dr (tau=1)\"),\n+ SwitchDoublyRobust(tau=100.0, estimator_name=\"switch-dr (tau=100)\"),\n+ DoublyRobustWithShrinkage(lambda_=1.0, estimator_name=\"dr-os (lambda=1)\"),\n+ DoublyRobustWithShrinkage(lambda_=100.0, estimator_name=\"dr-os (lambda=100)\"),\n]\nif __name__ == \"__main__\":\n@@ -187,7 +187,6 @@ if __name__ == \"__main__\":\nreturn relative_ee_i\nprocessed = Parallel(\n- backend=\"multiprocessing\",\nn_jobs=n_jobs,\nverbose=50,\n)([delayed(process)(i) for i in np.arange(n_runs)])\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/obd/evaluate_off_policy_estimators.py",
"new_path": "examples/obd/evaluate_off_policy_estimators.py",
"diff": "@@ -155,7 +155,6 @@ if __name__ == \"__main__\":\nreturn relative_ee_b\nprocessed = Parallel(\n- backend=\"multiprocessing\",\nn_jobs=n_jobs,\nverbose=50,\n)([delayed(process)(i) for i in np.arange(n_runs)])\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/online/README.md",
"new_path": "examples/online/README.md",
"diff": "@@ -57,7 +57,7 @@ python evaluate_off_policy_estimators.py\\\n# random_state=12345\n# ---------------------------------------------\n# mean std\n-# rm 0.202387 0.11685\n+# rm 0.097064 0.091453\n# =============================================\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/online/evaluate_off_policy_estimators.py",
"new_path": "examples/online/evaluate_off_policy_estimators.py",
"diff": "@@ -93,14 +93,6 @@ if __name__ == \"__main__\":\nrandom_state = args.random_state\nnp.random.seed(random_state)\n- # synthetic data generator with uniformly random policy\n- dataset = SyntheticBanditDataset(\n- n_actions=n_actions,\n- dim_context=dim_context,\n- reward_function=logistic_reward_function,\n- behavior_policy_function=None, # uniformly random\n- random_state=random_state,\n- )\n# define evaluation policy\nevaluation_policy_dict = dict(\nbernoulli_ts=BernoulliTS(n_actions=n_actions, random_state=random_state),\n@@ -125,6 +117,14 @@ if __name__ == \"__main__\":\nevaluation_policy = evaluation_policy_dict[evaluation_policy_name]\ndef process(i: int):\n+ # synthetic data generator with uniformly random policy\n+ dataset = SyntheticBanditDataset(\n+ n_actions=n_actions,\n+ dim_context=dim_context,\n+ reward_function=logistic_reward_function,\n+ behavior_policy_function=None, # uniformly random\n+ random_state=i,\n+ )\n# sample new data of synthetic logged bandit feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n# simulate the evaluation policy\n@@ -152,7 +152,6 @@ if __name__ == \"__main__\":\nreturn relative_ee_i\nprocessed = Parallel(\n- backend=\"multiprocessing\",\nn_jobs=n_jobs,\nverbose=50,\n)([delayed(process)(i) for i in np.arange(n_runs)])\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/synthetic/README.md",
"new_path": "examples/synthetic/README.md",
"diff": "@@ -64,15 +64,15 @@ python evaluate_off_policy_estimators.py\\\n# random_state=12345\n# ---------------------------------------------\n# mean std\n-# dm 0.180916 0.000650\n-# ipw 0.013690 0.008988\n-# snipw 0.014984 0.006156\n-# dr 0.007802 0.003867\n-# sndr 0.010062 0.002300\n-# switch-dr (tau=1) 0.180916 0.000650\n-# switch-dr (tau=100) 0.007802 0.003867\n-# dr-os (lambda=1) 0.180708 0.000646\n-# dr-os (lambda=100) 0.162749 0.000371\n+# dm 0.195878 0.012146\n+# ipw 0.019335 0.013199\n+# snipw 0.007543 0.005196\n+# dr 0.008099 0.006659\n+# sndr 0.008054 0.004911\n+# switch-dr (tau=1) 0.195878 0.012146\n+# switch-dr (tau=100) 0.008099 0.006659\n+# dr-os (lambda=1) 0.195642 0.012151\n+# dr-os (lambda=100) 0.175285 0.012801\n# =============================================\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/synthetic/evaluate_off_policy_estimators.py",
"new_path": "examples/synthetic/evaluate_off_policy_estimators.py",
"diff": "@@ -5,7 +5,7 @@ from pathlib import Path\nimport numpy as np\nfrom pandas import DataFrame\nfrom joblib import Parallel, delayed\n-from sklearn.experimental import enable_hist_gradient_boosting\n+from sklearn.experimental import enable_hist_gradient_boosting # nopa\nfrom sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\n@@ -45,10 +45,10 @@ ope_estimators = [\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\nSelfNormalizedDoublyRobust(),\n- SwitchDoublyRobust(tau=1., estimator_name=\"switch-dr (tau=1)\"),\n- SwitchDoublyRobust(tau=100., estimator_name=\"switch-dr (tau=100)\"),\n- DoublyRobustWithShrinkage(lambda_=1., estimator_name=\"dr-os (lambda=1)\"),\n- DoublyRobustWithShrinkage(lambda_=100., estimator_name=\"dr-os (lambda=100)\"),\n+ SwitchDoublyRobust(tau=1.0, estimator_name=\"switch-dr (tau=1)\"),\n+ SwitchDoublyRobust(tau=100.0, estimator_name=\"switch-dr (tau=100)\"),\n+ DoublyRobustWithShrinkage(lambda_=1.0, estimator_name=\"dr-os (lambda=1)\"),\n+ DoublyRobustWithShrinkage(lambda_=100.0, estimator_name=\"dr-os (lambda=100)\"),\n]\nif __name__ == \"__main__\":\n@@ -109,18 +109,16 @@ if __name__ == \"__main__\":\nbase_model_for_reg_model = args.base_model_for_reg_model\nn_jobs = args.n_jobs\nrandom_state = args.random_state\n- np.random.seed(random_state)\n+ def process(i: int):\n# synthetic data generator\ndataset = SyntheticBanditDataset(\nn_actions=n_actions,\ndim_context=dim_context,\nreward_function=logistic_reward_function,\nbehavior_policy_function=linear_behavior_policy,\n- random_state=random_state,\n+ random_state=i,\n)\n-\n- def process(i: int):\n# define evaluation policy using IPWLearner\nevaluation_policy = IPWLearner(\nn_actions=dataset.n_actions,\n@@ -174,7 +172,6 @@ if __name__ == \"__main__\":\nreturn relative_ee_i\nprocessed = Parallel(\n- backend=\"multiprocessing\",\nn_jobs=n_jobs,\nverbose=50,\n)([delayed(process)(i) for i in np.arange(n_runs)])\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | change backend of Parallel |
641,007 | 23.02.2021 01:24:20 | -32,400 | bb9e1d348657d21cde84c3753736fdf4c250a195 | fix type hint at fit_predict & calc_ground_truth_policy_value method | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/multiclass.py",
"new_path": "obp/dataset/multiclass.py",
"diff": "@@ -301,7 +301,7 @@ class MultiClassToBanditReduction(BaseBanditDataset):\n)\nreturn np.expand_dims(pi_e, 2)\n- def calc_ground_truth_policy_value(self, action_dist: np.ndarray) -> np.ndarray:\n+ def calc_ground_truth_policy_value(self, action_dist: np.ndarray) -> float:\n\"\"\"Calculate the ground-truth policy value of a given action distribution.\nParameters\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/regression_model.py",
"new_path": "obp/ope/regression_model.py",
"diff": "@@ -235,7 +235,7 @@ class RegressionModel(BaseEstimator):\naction_dist: Optional[np.ndarray] = None,\nn_folds: int = 1,\nrandom_state: Optional[int] = None,\n- ) -> None:\n+ ) -> np.ndarray:\n\"\"\"Fit the regression model on given logged bandit feedback data and predict the reward function of the same data.\nNote\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix type hint at fit_predict & calc_ground_truth_policy_value method |
641,003 | 07.03.2021 17:17:32 | -32,400 | 4058abbed87fbf170b16950ac1077f9eb767f088 | add lints.yml | [
{
"change_type": "ADD",
"old_path": null,
"new_path": ".github/workflows/lints.yml",
"diff": "+name: Lints\n+\n+on:\n+ push:\n+ branches:\n+ - master\n+ pull_request: {}\n+\n+jobs:\n+ lints:\n+ runs-on: ubuntu-latest\n+\n+ steps:\n+ - name: Checkout\n+ uses: actions/checkout@v2\n+\n+ - name: Setup Python\n+ uses: actions/setup-python@v2\n+ with:\n+ python-version: 3.7\n+\n+ - name: Black\n+ uses: psf/black@stable\n+ with:\n+ args: \". --check --diff\"\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add lints.yml |
641,003 | 07.03.2021 17:18:43 | -32,400 | cb92ca6a547dceab6d8fe5b02ea748a1f0ec07d8 | add tests.yml | [
{
"change_type": "ADD",
"old_path": null,
"new_path": ".github/workflows/tests.yml",
"diff": "+name: Tests\n+\n+on:\n+ push:\n+ branches:\n+ - master\n+ pull_request: {}\n+\n+jobs:\n+ tests:\n+ runs-on: ubuntu-latest\n+\n+ strategy:\n+ matrix:\n+ python-version: [3.7, 3.8, 3.9]\n+\n+ # Not intended for forks.\n+ if: github.repository == 'nmasahiro/zr-obp'\n+\n+ steps:\n+ - name: Checkout\n+ uses: actions/checkout@v2\n+\n+ - name: Setup Python${{ matrix.python-version }}\n+ uses: actions/setup-python@v2\n+ with:\n+ python-version: ${{ matrix.python-version }}\n+\n+ - name: Install\n+ run: |\n+ python -m pip install --upgrade pip\n+ pip install --progress-bar off -U setuptools\n+\n+ # Install pytest\n+ pip install --progress-bar off .\n+\n+ pip install --progress-bar off pytest\n+\n+ - name: Tests\n+ run: |\n+ pytest tests\n\\ No newline at end of file\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add tests.yml |
641,003 | 07.03.2021 17:22:41 | -32,400 | 302353d66b9a80272806ea98263f677bbcf1006f | adapt link to upstream repo | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/tests.yml",
"new_path": ".github/workflows/tests.yml",
"diff": "@@ -15,7 +15,7 @@ jobs:\npython-version: [3.7, 3.8, 3.9]\n# Not intended for forks.\n- if: github.repository == 'nmasahiro/zr-obp'\n+ if: github.repository == 'st-tech/zr-obp'\nsteps:\n- name: Checkout\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | adapt link to upstream repo |
641,003 | 07.03.2021 17:34:11 | -32,400 | b24c6d513a34aae1bfeefee820325ebe4b212d09 | add description of CI to CONTRIBUTING.md | [
{
"change_type": "MODIFY",
"old_path": "CONTRIBUTING.md",
"new_path": "CONTRIBUTING.md",
"diff": "@@ -29,3 +29,7 @@ After installing flake8, you can check the coding style by the following command\n# perform checking of the coding style\n$ flake8 .\n```\n+\n+## Continuous Integration\n+\n+Open Bandit Pipeline uses Github Actions to perform continuous integration.\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add description of CI to CONTRIBUTING.md |
641,006 | 07.03.2021 19:49:02 | -32,400 | 2e1cd0b2bdff685c2bbd0f285fd28a23f9ccd72c | fix hyperparameter validations of dr-variants | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -1098,10 +1098,12 @@ class SwitchDoublyRobust(DoublyRobust):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not isinstance(self.tau, float):\n+ if not isinstance(self.tau, (float, int)):\nraise ValueError(\n- f\"switching hyperparameter must be float, but {self.tau} is given\"\n+ f\"switching hyperparameter must be float or integer, but {self.tau} is given\"\n)\n+ if self.tau != self.tau:\n+ raise ValueError(\"switching hyperparameter must not be nan\")\nif self.tau < 0.0:\nraise ValueError(\nf\"switching hyperparameter must be larger than or equal to zero, but {self.tau} is given\"\n@@ -1221,10 +1223,12 @@ class DoublyRobustWithShrinkage(DoublyRobust):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not isinstance(self.lambda_, float):\n+ if not isinstance(self.lambda_, (float, int)):\nraise ValueError(\n- f\"shrinkage hyperparameter must be float, but {self.lambda_} is given\"\n+ f\"shrinkage hyperparameter must be float or integer, but {self.lambda_} is given\"\n)\n+ if self.lambda_ != self.lambda_:\n+ raise ValueError(\"shrinkage hyperparameter must not be nan\")\nif self.lambda_ < 0.0:\nraise ValueError(\nf\"shrinkage hyperparameter must be larger than or equal to zero, but {self.lambda_} is given\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -231,8 +231,9 @@ def test_dr_using_invalid_input_data(\n# switch-dr\ninvalid_input_of_switch = [\n- (\"a\", \"switching hyperparameter must be float\"),\n+ (\"a\", \"switching hyperparameter must be float or integer\"),\n(-1.0, \"switching hyperparameter must be larger than or equal to zero\"),\n+ (np.nan, \"switching hyperparameter must not be nan\"),\n]\n@@ -245,10 +246,25 @@ def test_switch_using_invalid_input_data(tau: float, description: str) -> None:\n_ = SwitchDoublyRobust(tau=tau)\n+valid_input_of_switch = [\n+ (3.0, \"float tau\"),\n+ (2, \"integer tau\"),\n+]\n+\n+\[email protected](\n+ \"tau, description\",\n+ valid_input_of_switch,\n+)\n+def test_switch_using_valid_input_data(tau: float, description: str) -> None:\n+ _ = SwitchDoublyRobust(tau=tau)\n+\n+\n# dr-os\ninvalid_input_of_shrinkage = [\n- (\"a\", \"shrinkage hyperparameter must be float\"),\n+ (\"a\", \"shrinkage hyperparameter must be float or integer\"),\n(-1.0, \"shrinkage hyperparameter must be larger than or equal to zero\"),\n+ (np.nan, \"shrinkage hyperparameter must not be nan\"),\n]\n@@ -261,6 +277,20 @@ def test_shrinkage_using_invalid_input_data(lambda_: float, description: str) ->\n_ = DoublyRobustWithShrinkage(lambda_=lambda_)\n+valid_input_of_shrinkage = [\n+ (3.0, \"float lambda_\"),\n+ (2, \"integer lambda_\"),\n+]\n+\n+\[email protected](\n+ \"lambda_, description\",\n+ valid_input_of_shrinkage,\n+)\n+def test_shrinkage_using_valid_input_data(lambda_: float, description: str) -> None:\n+ _ = DoublyRobustWithShrinkage(lambda_=lambda_)\n+\n+\n# dr variants\nvalid_input_of_dr_variants = [\n(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix hyperparameter validations of dr-variants |
641,005 | 08.03.2021 06:38:42 | -32,400 | d655183d41149a9d65da7889fafcfab728ec217d | add mypy-extensions | [
{
"change_type": "MODIFY",
"old_path": "requirements.txt",
"new_path": "requirements.txt",
"diff": "matplotlib>=3.2.2\n+mypy-extensions>=0.4.3\nnumpy>=1.18.1\npandas>=0.25.1\npyyaml>=5.1\n"
},
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -26,6 +26,7 @@ setup(\nlong_description_content_type=\"text/markdown\",\ninstall_requires=[\n\"matplotlib>=3.2.2\",\n+ \"mypy-extensions>=0.4.3\",\n\"numpy>=1.18.1\",\n\"pandas>=0.25.1\",\n\"pyyaml>=5.1\",\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add mypy-extensions |
641,005 | 08.03.2021 21:00:07 | -32,400 | c4ee9ab91bb58a01ed9de99ba7efb822c983b0a5 | use predict_proa | [
{
"change_type": "MODIFY",
"old_path": "examples/opl/README.md",
"new_path": "examples/opl/README.md",
"diff": "@@ -81,8 +81,10 @@ python evaluate_off_policy_learners.py\\\n# =============================================\n# random_state=12345\n# ---------------------------------------------\n-# random ipw nn\n-# policy value 0.604339 0.767615 0.77251\n+# policy value\n+# random 0.604339\n+# ipw 0.767615\n+# nn 0.764302\n# =============================================\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/opl/evaluate_off_policy_learners.py",
"new_path": "examples/opl/evaluate_off_policy_learners.py",
"diff": "@@ -210,7 +210,7 @@ if __name__ == \"__main__\":\nipw_learner_action_dist = ipw_learner.predict(\ncontext=bandit_feedback_test[\"context\"],\n)\n- nn_policy_learner_action_dist = nn_policy_learner.predict(\n+ nn_policy_learner_action_dist = nn_policy_learner.predict_proba(\ncontext=bandit_feedback_test[\"context\"],\n)\n@@ -236,7 +236,7 @@ if __name__ == \"__main__\":\n],\ncolumns=[\"policy value\"],\nindex=[\"random\", \"ipw\", \"nn\"],\n- ).T.round(6)\n+ ).round(6)\nprint(\"=\" * 45)\nprint(f\"random_state={random_state}\")\nprint(\"-\" * 45)\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/opl.ipynb",
"new_path": "examples/quickstart/opl.ipynb",
"diff": "\" estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\\n\",\n\")\\n\",\n\"# obtains action choice probabilities for the test set of the synthetic logged bandit feedback\\n\",\n- \"action_dist_nn_dm = nn_dm.predict(context=bandit_feedback_test[\\\"context\\\"])\"\n+ \"action_dist_nn_dm = nn_dm.predict_proba(context=bandit_feedback_test[\\\"context\\\"])\"\n]\n},\n{\n\" pscore=bandit_feedback_train[\\\"pscore\\\"],\\n\",\n\")\\n\",\n\"# obtains action choice probabilities for the test set of the synthetic logged bandit feedback\\n\",\n- \"action_dist_nn_ipw = nn_ipw.predict(context=bandit_feedback_test[\\\"context\\\"])\"\n+ \"action_dist_nn_ipw = nn_ipw.predict_proba(context=bandit_feedback_test[\\\"context\\\"])\"\n]\n},\n{\n\" estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\\n\",\n\")\\n\",\n\"# obtains action choice probabilities for the test set of the synthetic logged bandit feedback\\n\",\n- \"action_dist_nn_dr = nn_dr.predict(context=bandit_feedback_test[\\\"context\\\"])\"\n+ \"action_dist_nn_dr = nn_dr.predict_proba(context=bandit_feedback_test[\\\"context\\\"])\"\n]\n},\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"policy value of NN Policy Learner with DM: 0.6786610995854737\\n\",\n- \"policy value of NN Policy Learner with IPW: 0.751523191255569\\n\",\n- \"policy value of NN Policy Learner with DR: 0.7719675528392871\\n\",\n+ \"policy value of NN Policy Learner with DM: 0.6785771195516228\\n\",\n+ \"policy value of NN Policy Learner with IPW: 0.7429362678096227\\n\",\n+ \"policy value of NN Policy Learner with DR: 0.7651217293062053\\n\",\n\"policy value of IPW Learner with Logistic Regression: 0.767614655337475\\n\",\n\"policy value of IPW Learner with Random Forest: 0.703809241480009\\n\",\n\"policy value of Unifrom Random: 0.6043385526445931\\n\"\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"In fact, NN Policy Learner with DR reveals the best performance among the 6 evaluation policies.\"\n+ \"In fact, IPW Learner with Logistic Regression is the best, and NN Policy Learner with DR is the second.\"\n]\n},\n{\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -348,7 +348,11 @@ class IPWLearner(BaseOfflinePolicyLearner):\n@dataclass\nclass NNPolicyLearner(BaseOfflinePolicyLearner):\n- \"\"\"Off-policy learner using an neural network whose objective function is an off-policy estimator.\n+ \"\"\"Off-policy learner using an neural network whose objective function is an OPE estimator.\n+\n+ Note\n+ --------\n+ MLP is implemented in PyTorch.\nParameters\n-----------\n@@ -727,6 +731,16 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n) -> None:\n\"\"\"Fits an offline bandit policy using the given logged bandit feedback data.\n+ Note\n+ ----------\n+ Given the training data :math:`\\\\mathcal{D}`, this policy maximizes the following objective function:\n+\n+ .. math::\n+\n+ \\\\hat{V}(\\\\pi_\\\\theta; \\\\mathcal{D}) - \\\\lambda \\\\Omega(\\\\theta)\n+\n+ where :math:`\\\\hat{V}` is an OPE estimator and :math:`\\\\lambda \\\\Omega(\\\\theta)` is a regularization term.\n+\nParameters\n-----------\ncontext: array-like, shape (n_rounds, dim_context)\n@@ -960,7 +974,18 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nself,\ncontext: np.ndarray,\n) -> np.ndarray:\n- \"\"\"Obtains action choice probabilities for new data based on scores predicted by a classifier.\n+ \"\"\"Obtains action choice probabilities for new data.\n+\n+ Note\n+ --------\n+ This policy uses multi-layer perceptron (MLP) and the softmax function as the last layer.\n+ This is a stochastic policy and represented as follows:\n+\n+ .. math::\n+\n+ \\\\pi_\\\\theta (a \\\\mid x) = \\\\frac{\\\\exp(f_\\\\theta(x, a))}{\\\\sum_{a' \\\\in \\\\mathcal{A}} \\\\exp(f_\\\\theta(x, a'))}\n+\n+ where :math:`f__\\\\theta(x, a)` is MLP with parameter :math:`\\\\theta`.\nParameters\n----------------\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | use predict_proa |
641,003 | 14.03.2021 14:34:16 | -32,400 | 9256813f7a184cf2a2db6969941378e9c5a73a2c | allow position is None | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -145,7 +145,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\nif pscore is None:\nn_actions = np.int(action.max() + 1)\npscore = np.ones_like(action) / n_actions\n- if self.len_list == 1:\n+ if position is None or self.len_list == 1:\nposition = np.zeros_like(action, dtype=int)\nelse:\nif not isinstance(position, np.ndarray) or position.ndim != 1:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | allow position is None |
641,003 | 14.03.2021 15:00:15 | -32,400 | f209a674623b7d32b0e98e85eabe39bce5098bb8 | test offline estimation performance | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/policy/test_offline_estimation_performance.py",
"diff": "+import numpy as np\n+from pandas import DataFrame\n+from joblib import Parallel, delayed\n+from sklearn.experimental import enable_hist_gradient_boosting # NOQA\n+from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier\n+from sklearn.linear_model import LogisticRegression\n+import pytest\n+from dataclasses import dataclass\n+from obp.ope.estimators import BaseOffPolicyEstimator\n+\n+from typing import Dict\n+\n+from obp.dataset import (\n+ SyntheticBanditDataset,\n+ linear_behavior_policy,\n+ logistic_reward_function,\n+)\n+from obp.policy import IPWLearner\n+from obp.ope import (\n+ RegressionModel,\n+ OffPolicyEvaluation,\n+ InverseProbabilityWeighting,\n+ SelfNormalizedInverseProbabilityWeighting,\n+ DirectMethod,\n+ DoublyRobust,\n+ SelfNormalizedDoublyRobust,\n+ SwitchDoublyRobust,\n+ DoublyRobustWithShrinkage,\n+)\n+\n+\n+# hyperparameter for the regression model used in model dependent OPE estimators\n+hyperparams = {\n+ \"lightgbm\": {\n+ \"max_iter\": 500,\n+ \"learning_rate\": 0.005,\n+ \"max_depth\": 5,\n+ \"min_samples_leaf\": 10,\n+ \"random_state\": 12345,\n+ },\n+ \"logistic_regression\": {\n+ \"max_iter\": 10000,\n+ \"C\": 1000,\n+ \"random_state\": 12345,\n+ },\n+ \"random_forest\": {\n+ \"n_estimators\": 500,\n+ \"max_depth\": 5,\n+ \"min_samples_leaf\": 10,\n+ \"random_state\": 12345,\n+ },\n+}\n+\n+base_model_dict = dict(\n+ logistic_regression=LogisticRegression,\n+ lightgbm=HistGradientBoostingClassifier,\n+ random_forest=RandomForestClassifier,\n+)\n+\n+offline_experiment_configurations = [\n+ (\n+ 600,\n+ 10,\n+ 5,\n+ \"logistic_regression\",\n+ \"logistic_regression\",\n+ ),\n+ (\n+ 300,\n+ 3,\n+ 2,\n+ \"lightgbm\",\n+ \"lightgbm\",\n+ ),\n+ (\n+ 500,\n+ 5,\n+ 3,\n+ \"random_forest\",\n+ \"random_forest\",\n+ ),\n+ (\n+ 500,\n+ 3,\n+ 5,\n+ \"logistic_regression\",\n+ \"random_forest\",\n+ ),\n+ (\n+ 400,\n+ 10,\n+ 10,\n+ \"lightgbm\",\n+ \"logistic_regression\",\n+ ),\n+]\n+\n+\n+@dataclass\n+class RandomOffPolicyEstimator(BaseOffPolicyEstimator):\n+ \"\"\"Estimate the policy value based on random predictions\"\"\"\n+\n+ estimator_name: str = \"random\"\n+\n+ def _estimate_round_rewards(\n+ self,\n+ action_dist: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ n_rounds = action_dist.shape[0]\n+ return np.random.uniform(size=n_rounds)\n+\n+ def estimate_policy_value(\n+ self,\n+ action_dist: np.ndarray,\n+ **kwargs,\n+ ) -> float:\n+ \"\"\"Estimate policy value of an evaluation policy.\"\"\"\n+ return self._estimate_round_rewards(action_dist=action_dist).mean()\n+\n+ def estimate_interval(self) -> Dict[str, float]:\n+ pass # not used in this test\n+\n+\n+# compared OPE estimators\n+ope_estimators = [\n+ RandomOffPolicyEstimator(),\n+ DirectMethod(),\n+ InverseProbabilityWeighting(),\n+ SelfNormalizedInverseProbabilityWeighting(),\n+ DoublyRobust(),\n+ SelfNormalizedDoublyRobust(),\n+ SwitchDoublyRobust(tau=1.0, estimator_name=\"switch-dr (tau=1)\"),\n+ SwitchDoublyRobust(tau=100.0, estimator_name=\"switch-dr (tau=100)\"),\n+ DoublyRobustWithShrinkage(lambda_=1.0, estimator_name=\"dr-os (lambda=1)\"),\n+ DoublyRobustWithShrinkage(lambda_=100.0, estimator_name=\"dr-os (lambda=100)\"),\n+]\n+\n+\[email protected](\n+ \"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model\",\n+ offline_experiment_configurations,\n+)\n+def test_offline_ipwlearner_performance(\n+ n_rounds: int,\n+ n_actions: int,\n+ dim_context: int,\n+ base_model_for_evaluation_policy: str,\n+ base_model_for_reg_model: str,\n+) -> None:\n+ def process(i: int):\n+ # synthetic data generator\n+ dataset = SyntheticBanditDataset(\n+ n_actions=n_actions,\n+ dim_context=dim_context,\n+ reward_function=logistic_reward_function,\n+ behavior_policy_function=linear_behavior_policy,\n+ random_state=i,\n+ )\n+ # define evaluation policy using IPWLearner\n+ evaluation_policy = IPWLearner(\n+ n_actions=dataset.n_actions,\n+ base_classifier=base_model_dict[base_model_for_evaluation_policy](\n+ **hyperparams[base_model_for_evaluation_policy]\n+ ),\n+ )\n+ # sample new training and test sets of synthetic logged bandit feedback\n+ bandit_feedback_train = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ bandit_feedback_test = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ # train the evaluation policy on the training set of the synthetic logged bandit feedback\n+ evaluation_policy.fit(\n+ context=bandit_feedback_train[\"context\"],\n+ action=bandit_feedback_train[\"action\"],\n+ reward=bandit_feedback_train[\"reward\"],\n+ pscore=bandit_feedback_train[\"pscore\"],\n+ )\n+ # predict the action decisions for the test set of the synthetic logged bandit feedback\n+ action_dist = evaluation_policy.predict(\n+ context=bandit_feedback_test[\"context\"],\n+ )\n+ # estimate the mean reward function of the test set of synthetic bandit feedback with ML model\n+ regression_model = RegressionModel(\n+ n_actions=dataset.n_actions,\n+ action_context=dataset.action_context,\n+ base_model=base_model_dict[base_model_for_reg_model](\n+ **hyperparams[base_model_for_reg_model]\n+ ),\n+ )\n+ estimated_rewards_by_reg_model = regression_model.fit_predict(\n+ context=bandit_feedback_test[\"context\"],\n+ action=bandit_feedback_test[\"action\"],\n+ reward=bandit_feedback_test[\"reward\"],\n+ n_folds=3, # 3-fold cross-fitting\n+ random_state=12345,\n+ )\n+ # evaluate estimators' performances using relative estimation error (relative-ee)\n+ ope = OffPolicyEvaluation(\n+ bandit_feedback=bandit_feedback_test,\n+ ope_estimators=ope_estimators,\n+ )\n+ relative_ee_i = ope.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=dataset.calc_ground_truth_policy_value(\n+ expected_reward=bandit_feedback_test[\"expected_reward\"],\n+ action_dist=action_dist,\n+ ),\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+ return relative_ee_i\n+\n+ n_runs = 10\n+ processed = Parallel(\n+ n_jobs=-1,\n+ verbose=0,\n+ )([delayed(process)(i) for i in np.arange(n_runs)])\n+ relative_ee_dict = {est.estimator_name: dict() for est in ope_estimators}\n+ for i, relative_ee_i in enumerate(processed):\n+ for (\n+ estimator_name,\n+ relative_ee_,\n+ ) in relative_ee_i.items():\n+ relative_ee_dict[estimator_name][i] = relative_ee_\n+ relative_ee_df = DataFrame(relative_ee_dict).describe().T.round(6)\n+ relative_ee_df_mean = relative_ee_df[\"mean\"]\n+\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dm\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"ipw\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"snipw\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"sndr\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (tau=1)\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (tau=100)\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr-os (lambda=1)\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr-os (lambda=100)\"]\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | test offline estimation performance |
641,003 | 14.03.2021 15:45:06 | -32,400 | 5250825ccfe2c056c660a1aaccf0b493efeb919a | add test offline learner performance | [
{
"change_type": "RENAME",
"old_path": "tests/policy/test_offline_estimation_performance.py",
"new_path": "tests/ope/test_offline_estimation_performance.py",
"diff": "@@ -141,7 +141,7 @@ ope_estimators = [\n\"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model\",\noffline_experiment_configurations,\n)\n-def test_offline_ipwlearner_performance(\n+def test_offline_estimation_performance(\nn_rounds: int,\nn_actions: int,\ndim_context: int,\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/policy/test_offline_learner_performance.py",
"diff": "+import numpy as np\n+from joblib import Parallel, delayed\n+from sklearn.experimental import enable_hist_gradient_boosting # NOQA\n+from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier\n+from sklearn.linear_model import LogisticRegression\n+import pytest\n+from dataclasses import dataclass\n+from obp.policy.base import BaseOfflinePolicyLearner\n+from sklearn.base import clone, ClassifierMixin, is_classifier\n+\n+from typing import Optional\n+from typing import Tuple\n+\n+from obp.dataset import (\n+ SyntheticBanditDataset,\n+ linear_behavior_policy,\n+ logistic_reward_function,\n+)\n+from obp.policy import IPWLearner\n+\n+\n+# hyperparameter for the regression model used in model dependent OPE estimators\n+hyperparams = {\n+ \"lightgbm\": {\n+ \"max_iter\": 500,\n+ \"learning_rate\": 0.005,\n+ \"max_depth\": 5,\n+ \"min_samples_leaf\": 10,\n+ \"random_state\": 12345,\n+ },\n+ \"logistic_regression\": {\n+ \"max_iter\": 10000,\n+ \"C\": 1000,\n+ \"random_state\": 12345,\n+ },\n+ \"random_forest\": {\n+ \"n_estimators\": 500,\n+ \"max_depth\": 5,\n+ \"min_samples_leaf\": 10,\n+ \"random_state\": 12345,\n+ },\n+}\n+\n+base_model_dict = dict(\n+ logistic_regression=LogisticRegression,\n+ lightgbm=HistGradientBoostingClassifier,\n+ random_forest=RandomForestClassifier,\n+)\n+\n+offline_experiment_configurations = [\n+ (\n+ 600,\n+ 10,\n+ 5,\n+ \"logistic_regression\",\n+ \"logistic_regression\",\n+ ),\n+ (\n+ 450,\n+ 3,\n+ 2,\n+ \"lightgbm\",\n+ \"lightgbm\",\n+ ),\n+ (\n+ 500,\n+ 5,\n+ 3,\n+ \"random_forest\",\n+ \"random_forest\",\n+ ),\n+ (\n+ 500,\n+ 3,\n+ 5,\n+ \"logistic_regression\",\n+ \"random_forest\",\n+ ),\n+ (\n+ 400,\n+ 10,\n+ 10,\n+ \"lightgbm\",\n+ \"logistic_regression\",\n+ ),\n+]\n+\n+\n+@dataclass\n+class RandomPolicy(BaseOfflinePolicyLearner):\n+\n+ def __post_init__(self) -> None:\n+ super().__post_init__()\n+\n+ def fit(self):\n+ raise NotImplementedError\n+\n+ def predict(self, context: np.ndarray) -> np.ndarray:\n+\n+ n_rounds = context.shape[0]\n+ action_dist = np.random.rand(n_rounds, self.n_actions, self.len_list)\n+ return action_dist\n+\n+\n+@dataclass\n+class UniformSampleWeightLearner(BaseOfflinePolicyLearner):\n+\n+ base_classifier: Optional[ClassifierMixin] = None\n+\n+ def __post_init__(self) -> None:\n+ super().__post_init__()\n+ if self.base_classifier is None:\n+ self.base_classifier = LogisticRegression(random_state=12345)\n+ else:\n+ if not is_classifier(self.base_classifier):\n+ raise ValueError(\"base_classifier must be a classifier\")\n+ self.base_classifier_list = [\n+ clone(self.base_classifier) for _ in np.arange(self.len_list)\n+ ]\n+\n+ def _create_train_data_for_opl(\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: np.ndarray,\n+ **kwargs,\n+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n+\n+ return context, (reward / pscore), action\n+\n+ def fit(\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: Optional[np.ndarray] = None,\n+ position: Optional[np.ndarray] = None,\n+ ) -> None:\n+\n+ if pscore is None:\n+ n_actions = np.int(action.max() + 1)\n+ pscore = np.ones_like(action) / n_actions\n+ if position is None or self.len_list == 1:\n+ position = np.zeros_like(action, dtype=int)\n+\n+ for position_ in np.arange(self.len_list):\n+ X, sample_weight, y = self._create_train_data_for_opl(\n+ context=context[position == position_],\n+ action=action[position == position_],\n+ reward=reward[position == position_],\n+ pscore=pscore[position == position_],\n+ )\n+ self.base_classifier_list[position_].fit(\n+ X=X, y=y\n+ )\n+\n+ def predict(self, context: np.ndarray) -> np.ndarray:\n+\n+ n_rounds = context.shape[0]\n+ action_dist = np.zeros((n_rounds, self.n_actions, self.len_list))\n+ for position_ in np.arange(self.len_list):\n+ predicted_actions_at_position = self.base_classifier_list[\n+ position_\n+ ].predict(context)\n+ action_dist[\n+ np.arange(n_rounds),\n+ predicted_actions_at_position,\n+ np.ones(n_rounds, dtype=int) * position_,\n+ ] += 1\n+ return action_dist\n+\n+\[email protected](\n+ \"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model\",\n+ offline_experiment_configurations,\n+)\n+def test_offline_ipwlearner_performance(\n+ n_rounds: int,\n+ n_actions: int,\n+ dim_context: int,\n+ base_model_for_evaluation_policy: str,\n+ base_model_for_reg_model: str,\n+) -> None:\n+ def process(i: int):\n+ # synthetic data generator\n+ dataset = SyntheticBanditDataset(\n+ n_actions=n_actions,\n+ dim_context=dim_context,\n+ reward_function=logistic_reward_function,\n+ behavior_policy_function=linear_behavior_policy,\n+ random_state=i,\n+ )\n+ # define evaluation policy using IPWLearner\n+ ipw_policy = IPWLearner(\n+ n_actions=dataset.n_actions,\n+ base_classifier=base_model_dict[base_model_for_evaluation_policy](\n+ **hyperparams[base_model_for_evaluation_policy]\n+ ),\n+ )\n+ # baseline method 1. RandomPolicy\n+ random_policy = RandomPolicy(\n+ n_actions=dataset.n_actions\n+ )\n+ # baseline method 2. UniformSampleWeightLearner\n+ uniform_sample_weight_policy = UniformSampleWeightLearner(\n+ n_actions=dataset.n_actions,\n+ base_classifier=base_model_dict[base_model_for_evaluation_policy](\n+ **hyperparams[base_model_for_evaluation_policy]\n+ ),\n+ )\n+ # sample new training and test sets of synthetic logged bandit feedback\n+ bandit_feedback_train = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ bandit_feedback_test = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ # train the evaluation policy on the training set of the synthetic logged bandit feedback\n+ ipw_policy.fit(\n+ context=bandit_feedback_train[\"context\"],\n+ action=bandit_feedback_train[\"action\"],\n+ reward=bandit_feedback_train[\"reward\"],\n+ pscore=bandit_feedback_train[\"pscore\"],\n+ )\n+ uniform_sample_weight_policy.fit(\n+ context=bandit_feedback_train[\"context\"],\n+ action=bandit_feedback_train[\"action\"],\n+ reward=bandit_feedback_train[\"reward\"],\n+ pscore=bandit_feedback_train[\"pscore\"],\n+ )\n+ # predict the action decisions for the test set of the synthetic logged bandit feedback\n+ ipw_action_dist = ipw_policy.predict(\n+ context=bandit_feedback_test[\"context\"],\n+ )\n+ random_action_dist = random_policy.predict(\n+ context=bandit_feedback_test[\"context\"],\n+ )\n+ uniform_sample_weight_action_dist = uniform_sample_weight_policy.predict(\n+ context=bandit_feedback_test[\"context\"],\n+ )\n+ # get the ground truth policy value for each learner\n+ gt_ipw_learner = dataset.calc_ground_truth_policy_value(\n+ expected_reward=bandit_feedback_test[\"expected_reward\"],\n+ action_dist=ipw_action_dist,\n+ )\n+ gt_random_policy = dataset.calc_ground_truth_policy_value(\n+ expected_reward=bandit_feedback_test[\"expected_reward\"],\n+ action_dist=random_action_dist,\n+ )\n+ gt_uniform_sample_weight_learner = dataset.calc_ground_truth_policy_value(\n+ expected_reward=bandit_feedback_test[\"expected_reward\"],\n+ action_dist=uniform_sample_weight_action_dist,\n+ )\n+\n+ return gt_ipw_learner, gt_random_policy, gt_uniform_sample_weight_learner\n+\n+ n_runs = 10\n+ processed = Parallel(\n+ n_jobs=-1,\n+ verbose=0,\n+ )([delayed(process)(i) for i in np.arange(n_runs)])\n+ list_gt_ipw, list_gt_random, list_gt_uniform = [], [], []\n+ for i, ground_truth_policy_values in enumerate(processed):\n+ gt_ipw, gt_random, gt_uniform = ground_truth_policy_values\n+ list_gt_ipw.append(gt_ipw)\n+ list_gt_random.append(gt_random)\n+ list_gt_uniform.append(gt_uniform)\n+\n+ assert np.mean(list_gt_ipw) > np.mean(list_gt_random)\n+ assert np.mean(list_gt_ipw) > np.mean(list_gt_uniform)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add test offline learner performance |
641,006 | 15.03.2021 00:28:46 | -32,400 | 142bb208eeec48988ce9a9ed9983a2bae1c9e385 | add slate bandit dataset | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "from .base import *\nfrom .real import *\nfrom .synthetic import *\n+from .synthetic_slate import *\nfrom .multiclass import *\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "+# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.\n+# Licensed under the Apache 2.0 License.\n+\n+\"\"\"Class for Generating Synthetic SLate Logged Bandit Feedback.\"\"\"\n+from dataclasses import dataclass\n+from typing import Optional, Callable, Tuple, Union, List\n+from itertools import permutations\n+\n+import numpy as np\n+from scipy.stats import truncnorm\n+from sklearn.utils import check_random_state, check_scalar\n+from tqdm import tqdm\n+\n+from .base import BaseBanditDataset\n+from ..types import BanditFeedback\n+from ..utils import sigmoid, softmax\n+\n+\n+@dataclass\n+class SyntheticSlateBanditDataset(BaseBanditDataset):\n+ \"\"\"Class for generating synthetic slate bandit dataset.\n+\n+ Note\n+ -----\n+ By calling the `obtain_batch_bandit_feedback` method several times,\n+ we have different bandit samples with the same setting.\n+ This can be used to estimate confidence intervals of the performances of OPE estimators.\n+\n+ If None is set as `behavior_policy_function`, the synthetic data will be context-free bandit feedback.\n+\n+ Parameters\n+ -----------\n+ n_actions: int\n+ Number of actions.\n+\n+ dim_context: int, default=1\n+ Number of dimensions of context vectors.\n+\n+ reward_type: str, default='binary'\n+ Type of reward variable, which must be either 'binary' or 'continuous'.\n+ When 'binary' is given, rewards are sampled from the Bernoulli distribution.\n+ When 'continuous' is given, rewards are sampled from the truncated Normal distribution with `scale=1`.\n+ The mean parameter of the reward distribution is determined by the `reward_function` specified by the next argument.\n+\n+ reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\n+ Function generating expected reward for each given action-context pair,\n+ i.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n+ If None is set, context **independent** expected reward for each action will be\n+ sampled from the uniform distribution automatically.\n+\n+ behavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default=None\n+ Function generating probability distribution over action space,\n+ i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+ If None is set, context **independent** uniform distribution will be used (uniform random behavior policy).\n+\n+ random_state: int, default=12345\n+ Controls the random seed in sampling synthetic bandit dataset.\n+\n+ dataset_name: str, default='synthetic_bandit_dataset'\n+ Name of the dataset.\n+\n+ Examples\n+ ----------\n+\n+ .. code-block:: python\n+\n+ >>> import numpy as np\n+ >>> from obp.dataset import (\n+ SyntheticBanditDataset,\n+ linear_reward_function,\n+ linear_behavior_policy\n+ )\n+\n+ # generate synthetic contextual bandit feedback with 10 actions.\n+ >>> dataset = SyntheticBanditDataset(\n+ n_actions=10,\n+ dim_context=5,\n+ reward_function=logistic_reward_function,\n+ behavior_policy=linear_behavior_policy,\n+ random_state=12345\n+ )\n+ >>> bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=100000)\n+ >>> bandit_feedback\n+ {\n+ 'n_rounds': 100000,\n+ 'n_actions': 10,\n+ 'context': array([[-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057],\n+ [ 1.39340583, 0.09290788, 0.28174615, 0.76902257, 1.24643474],\n+ [ 1.00718936, -1.29622111, 0.27499163, 0.22891288, 1.35291684],\n+ ...,\n+ [ 1.36946256, 0.58727761, -0.69296769, -0.27519988, -2.10289159],\n+ [-0.27428715, 0.52635353, 1.02572168, -0.18486381, 0.72464834],\n+ [-1.25579833, -1.42455203, -0.26361242, 0.27928604, 1.21015571]]),\n+ 'action_context': array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n+ [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n+ [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n+ [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n+ [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n+ [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n+ [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n+ [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n+ [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]),\n+ 'action': array([7, 4, 0, ..., 7, 9, 6]),\n+ 'position': None,\n+ 'reward': array([0, 1, 1, ..., 0, 1, 0]),\n+ 'expected_reward': array([[0.80210203, 0.73828559, 0.83199558, ..., 0.81190503, 0.70617705,\n+ 0.68985306],\n+ [0.94119582, 0.93473317, 0.91345213, ..., 0.94140688, 0.93152449,\n+ 0.90132868],\n+ [0.87248862, 0.67974991, 0.66965669, ..., 0.79229752, 0.82712978,\n+ 0.74923536],\n+ ...,\n+ [0.64856003, 0.38145901, 0.84476094, ..., 0.40962057, 0.77114661,\n+ 0.65752798],\n+ [0.73208527, 0.82012699, 0.78161352, ..., 0.72361416, 0.8652249 ,\n+ 0.82571751],\n+ [0.40348366, 0.24485417, 0.24037926, ..., 0.49613133, 0.30714854,\n+ 0.5527749 ]]),\n+ 'pscore': array([0.05423855, 0.10339675, 0.09756788, ..., 0.05423855, 0.07250876,\n+ 0.14065505])\n+ }\n+\n+ \"\"\"\n+\n+ n_actions: int\n+ len_list: int\n+ dim_context: int = 1\n+ reward_type: str = \"binary\"\n+ reward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\n+ behavior_policy_function: Optional[\n+ Callable[[np.ndarray, np.ndarray], np.ndarray]\n+ ] = None\n+ random_state: int = 12345\n+ dataset_name: str = \"synthetic_slate_bandit_dataset\"\n+\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize Class.\"\"\"\n+ if not isinstance(self.n_actions, int) or self.n_actions <= 1:\n+ raise ValueError(\n+ f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n+ )\n+ if (\n+ not isinstance(self.len_list, int)\n+ or self.len_list <= 1\n+ or self.len_list > self.n_actions\n+ ):\n+ raise ValueError(\n+ f\"len_list must be an integer such that 1 < len_list <= n_actions, but {self.len_list} is given\"\n+ )\n+ if not isinstance(self.dim_context, int) or self.dim_context <= 0:\n+ raise ValueError(\n+ f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n+ )\n+ if self.reward_type not in [\n+ \"binary\",\n+ \"continuous\",\n+ ]:\n+ raise ValueError(\n+ f\"reward_type must be either 'binary' or 'continuous, but {self.reward_type} is given.'\"\n+ )\n+ if not isinstance(self.random_state, int):\n+ raise ValueError(\"random_state must be an integer\")\n+ self.random_ = check_random_state(self.random_state)\n+ if self.behavior_policy_function is None:\n+ self.behavior_policy = np.ones(self.n_actions) / self.n_actions\n+ if self.reward_type == \"continuous\":\n+ self.reward_min = 0\n+ self.reward_max = 1e10\n+ self.reward_std = 1.0\n+ # one-hot encoding representations characterizing each action\n+ self.action_context = np.eye(self.n_actions, dtype=int)\n+\n+ def get_marginal_pscore(\n+ self, perm: List[int], behavior_policy_logit_i_: np.ndarray\n+ ) -> float:\n+ action_set = np.arange(self.n_actions)\n+ pscore_ = 1.0\n+ for action in perm:\n+ score_ = softmax(behavior_policy_logit_i_[:, action_set])[0]\n+ action_index = np.where(action_set == action)[0][0]\n+ pscore_ *= score_[action_index]\n+ action_set = np.delete(action_set, action_set == action)\n+ return pscore_\n+\n+ def sample_action(\n+ self,\n+ behavior_policy_logit_: np.ndarray,\n+ n_rounds: int,\n+ return_pscore_marginal: bool = True,\n+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray]]:\n+ action = np.zeros(n_rounds * self.len_list, dtype=int)\n+ pscore_joint_above = np.zeros(n_rounds * self.len_list)\n+ pscore_joint_all = np.zeros(n_rounds * self.len_list)\n+ if return_pscore_marginal:\n+ pscore_marginal = np.zeros(n_rounds * self.len_list)\n+ else:\n+ pscore_marginal = None\n+ for i in tqdm(np.arange(n_rounds), desc=\"[sample_action]\", total=n_rounds):\n+ action_set = np.arange(self.n_actions)\n+ pscore_i = 1.0\n+ for position_ in np.arange(self.len_list):\n+ score_ = softmax(behavior_policy_logit_[i : i + 1, action_set])[0]\n+ action_sampled = self.random_.choice(\n+ action_set, p=score_, replace=False\n+ )\n+ action[i * self.len_list + position_] = action_sampled\n+ sampled_action_index = np.where(action_set == action_sampled)[0][0]\n+ # calculate joint pscore\n+ pscore_joint_above[i * self.len_list + position_] = (\n+ pscore_i * score_[sampled_action_index]\n+ )\n+ pscore_i *= score_[sampled_action_index]\n+ action_set = np.delete(action_set, action_set == action_sampled)\n+ # calculate marginal pscore\n+ if return_pscore_marginal:\n+ pscore_marginal_i_l = 0.0\n+ for perm in permutations(range(self.n_actions), self.len_list):\n+ if sampled_action_index not in perm:\n+ continue\n+ pscore_marginal_i_l += self.get_marginal_pscore(\n+ perm=perm,\n+ behavior_policy_logit_i_=behavior_policy_logit_[i : i + 1],\n+ )\n+ pscore_marginal[i * self.len_list + position_] = pscore_marginal_i_l\n+\n+ # calculate joint pscore all\n+ start_idx = i * self.len_list\n+ end_idx = start_idx + self.len_list\n+ pscore_joint_all[start_idx:end_idx] = pscore_i\n+\n+ return action, pscore_joint_above, pscore_joint_all, pscore_marginal\n+\n+ def obtain_batch_bandit_feedback(\n+ self,\n+ n_rounds: int,\n+ tau: Union[int, float] = 1.0,\n+ return_pscore_marginal: bool = True,\n+ ) -> BanditFeedback:\n+ \"\"\"Obtain batch logged bandit feedback.\n+\n+ Parameters\n+ ----------\n+ n_rounds: int\n+ Number of rounds for synthetic bandit feedback data.\n+\n+ Returns\n+ ---------\n+ bandit_feedback: BanditFeedback\n+ Generated synthetic bandit feedback dataset.\n+\n+ \"\"\"\n+ if not isinstance(n_rounds, int) or n_rounds <= 0:\n+ raise ValueError(\n+ f\"n_rounds must be a positive integer, but {n_rounds} is given\"\n+ )\n+\n+ context = self.random_.normal(size=(n_rounds, self.dim_context))\n+ # sample actions for each round based on the behavior policy\n+ if self.behavior_policy_function is None:\n+ behavior_policy_logit_ = np.tile(self.behavior_policy, (n_rounds, 1))\n+ else:\n+ behavior_policy_logit_ = self.behavior_policy_function(\n+ context=context,\n+ action_context=self.action_context,\n+ random_state=self.random_state,\n+ )\n+ (\n+ action,\n+ pscore_joint_above,\n+ pscore_joint_all,\n+ pscore_marginal,\n+ ) = self.sample_action(\n+ behavior_policy_logit_=behavior_policy_logit_,\n+ n_rounds=n_rounds,\n+ return_pscore_marginal=return_pscore_marginal,\n+ )\n+ # action_3d = np.identity(self.n_actions)[\n+ # action.reshape((n_rounds, self.len_list))\n+ # ]\n+\n+ reward = np.zeros(n_rounds * self.len_list, dtype=int)\n+\n+ return dict(\n+ n_rounds=n_rounds,\n+ n_actions=self.n_actions,\n+ impression_id=np.repeat(np.arange(n_rounds), self.len_list),\n+ context=context,\n+ action_context=self.action_context,\n+ action=action,\n+ position=np.tile(range(self.len_list), n_rounds),\n+ reward=reward,\n+ expected_reward=None,\n+ pscore_joint_above=pscore_joint_above,\n+ pscore_joint_all=pscore_joint_all,\n+ pscore_marginal=pscore_marginal,\n+ )\n+\n+\n+def logistic_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Logistic mean reward function for synthetic bandit datasets.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation for each action.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_reward: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional ndarray\")\n+\n+ random_ = check_random_state(random_state)\n+ logits = np.zeros((context.shape[0], action_context.shape[0]))\n+ # each arm has different coefficient vectors\n+ coef_ = random_.uniform(size=(action_context.shape[0], context.shape[1]))\n+ action_coef_ = random_.uniform(size=action_context.shape[1])\n+ for d in np.arange(action_context.shape[0]):\n+ logits[:, d] = context @ coef_[d] + action_context[d] @ action_coef_\n+\n+ return sigmoid(logits)\n+\n+\n+def linear_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Linear mean reward function for synthetic bandit datasets.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation for each action.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_reward: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional ndarray\")\n+\n+ random_ = check_random_state(random_state)\n+ expected_reward = np.zeros((context.shape[0], action_context.shape[0]))\n+ # each arm has different coefficient vectors\n+ coef_ = random_.uniform(size=(action_context.shape[0], context.shape[1]))\n+ action_coef_ = random_.uniform(size=action_context.shape[1])\n+ for d in np.arange(action_context.shape[0]):\n+ expected_reward[:, d] = context @ coef_[d] + action_context[d] @ action_coef_\n+\n+ return expected_reward\n+\n+\n+def linear_behavior_policy_logit(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+ tau: Union[int, float] = 1.0,\n+) -> np.ndarray:\n+ \"\"\"Linear contextual behavior policy for synthetic bandit datasets.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation for each action.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ tau: int or float, default=1.0\n+ A temperature parameter, controlling the randomness of the action choice.\n+ As :math:`\\\\tau \\\\rightarrow \\\\infty`, the algorithm will select arms uniformly at random.\n+\n+\n+ Returns\n+ ---------\n+ behavior_policy: array-like, shape (n_rounds, n_actions)\n+ logit given context (:math:`x`), i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional ndarray\")\n+\n+ check_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\n+\n+ random_ = check_random_state(random_state)\n+ logits = np.zeros((context.shape[0], action_context.shape[0]))\n+ coef_ = random_.uniform(size=context.shape[1])\n+ action_coef_ = random_.uniform(size=action_context.shape[1])\n+ for d in np.arange(action_context.shape[0]):\n+ logits[:, d] = context @ coef_ + action_context[d] @ action_coef_\n+\n+ return logits / tau\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "+from typing import List\n+\n+import pytest\n+import numpy as np\n+import pandas as pd\n+\n+from obp.dataset import SyntheticSlateBanditDataset\n+from obp.dataset.synthetic_slate import (\n+ logistic_reward_function,\n+ linear_reward_function,\n+ linear_behavior_policy_logit,\n+)\n+from obp.types import BanditFeedback\n+\n+\n+# n_actions, len_list, dim_context, reward_type, random_state, description\n+invalid_input_of_init = [\n+ (\"4\", 3, 2, \"binary\", 1, \"n_actions must be an integer larger than 1\"),\n+ (1, 3, 2, \"binary\", 1, \"n_actions must be an integer larger than 1\"),\n+ (5, \"4\", 2, \"binary\", 1, \"len_list must be an integer such that\"),\n+ (5, -1, 2, \"binary\", 1, \"len_list must be an integer such that\"),\n+ (5, 10, 2, \"binary\", 1, \"len_list must be an integer such that\"),\n+ (5, 3, 0, \"binary\", 1, \"dim_context must be a positive integer\"),\n+ (5, 3, \"2\", \"binary\", 1, \"dim_context must be a positive integer\"),\n+ (5, 3, 2, \"aaa\", 1, \"reward_type must be either\"),\n+ (5, 3, 2, \"binary\", \"x\", \"random_state must be an integer\"),\n+ (5, 3, 2, \"binary\", None, \"random_state must be an integer\"),\n+]\n+\n+\[email protected](\n+ \"n_actions, len_list, dim_context, reward_type, random_state, description\",\n+ invalid_input_of_init,\n+)\n+def test_synthetic_slate_init_using_invalid_inputs(\n+ n_actions, len_list, dim_context, reward_type, random_state, description\n+):\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ )\n+\n+\n+def check_slate_bandit_feedback(bandit_feedback: BanditFeedback):\n+ # check pscore columns\n+ pscore_columns: List[str] = []\n+ pscore_candidate_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ for column in pscore_candidate_columns:\n+ if column in bandit_feedback and bandit_feedback[column] is not None:\n+ pscore_columns.append(column)\n+ else:\n+ pscore_columns.append(column)\n+ assert (\n+ len(pscore_columns) > 0\n+ ), f\"bandit feedback must contains at least one of the following pscore columns: {pscore_candidate_columns}\"\n+ bandit_feedback_df = pd.DataFrame()\n+ for column in [\"impression_id\", \"position\", \"action\"] + pscore_columns:\n+ bandit_feedback_df[column] = bandit_feedback[column]\n+ # sort dataframe\n+ bandit_feedback_df = (\n+ bandit_feedback_df.sort_values([\"impression_id\", \"position\"])\n+ .reset_index(drop=True)\n+ .copy()\n+ )\n+ # check uniqueness\n+ assert (\n+ bandit_feedback_df.duplicated([\"impression_id\", \"position\"]).sum() == 0\n+ ), \"position must not be duplicated in each impression\"\n+ assert (\n+ bandit_feedback_df.duplicated([\"impression_id\", \"action\"]).sum() == 0\n+ ), \"action must not be duplicated in each impression\"\n+ # check pscores\n+ for column in pscore_columns:\n+ invalid_pscore_flgs = (bandit_feedback_df[column] < 0) | (\n+ bandit_feedback_df[column] > 1\n+ )\n+ assert invalid_pscore_flgs.sum() == 0, \"the range of pscores must be [0, 1]\"\n+ if \"pscore_joint_above\" in pscore_columns and \"pscore_joint_all\" in pscore_columns:\n+ assert (\n+ bandit_feedback_df[\"pscore_joint_above\"]\n+ < bandit_feedback_df[\"pscore_joint_all\"]\n+ ).sum() == 0, \"pscore_joint_above is smaller or equal to pscore_joint_all\"\n+ if \"pscore_marginal\" in pscore_columns and \"pscore_joint_all\" in pscore_columns:\n+ assert (\n+ bandit_feedback_df[\"pscore_marginal\"]\n+ < bandit_feedback_df[\"pscore_joint_all\"]\n+ ).sum() == 0, \"pscore_joint_all is smaller or equal to pscore_marginal\"\n+ if \"pscore_marginal\" in pscore_columns and \"pscore_joint_above\" in pscore_columns:\n+ assert (\n+ bandit_feedback_df[\"pscore_marginal\"]\n+ < bandit_feedback_df[\"pscore_joint_above\"]\n+ ).sum() == 0, \"pscore_joint_above is smaller or equal to pscore_marginal\"\n+ if \"pscore_joint_above\" in pscore_columns:\n+ previous_minimum_pscore_joint_above = (\n+ bandit_feedback_df.groupby(\"impression_id\")[\"pscore_joint_above\"]\n+ .expanding()\n+ .min()\n+ .values\n+ )\n+ assert (\n+ previous_minimum_pscore_joint_above\n+ < bandit_feedback_df[\"pscore_joint_above\"]\n+ ).sum() == 0, (\n+ \"pscore_joint_above must be non-decresing sequence in each impression\"\n+ )\n+ if \"pscore_joint_all\" in pscore_columns:\n+ count_pscore_joint_all_in_expression = bandit_feedback_df.groupby(\n+ \"impression_id\"\n+ ).apply(lambda x: x[\"pscore_joint_all\"].unique().shape[0])\n+ assert (\n+ count_pscore_joint_all_in_expression != 1\n+ ).sum() == 0, \"pscore_joint_all must be unique in each impression\"\n+ if \"pscore_joint_all\" in pscore_columns and \"pscore_joint_above\" in pscore_columns:\n+ last_slot_feedback_df = bandit_feedback_df.drop_duplicates(\n+ \"impression_id\", keep=\"last\"\n+ )\n+ assert (\n+ last_slot_feedback_df[\"pscore_joint_all\"]\n+ != last_slot_feedback_df[\"pscore_joint_above\"]\n+ ).sum() == 0, (\n+ \"pscore_joint_all must be the same as pscore_joint_above in the last slot\"\n+ )\n+\n+\n+def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy():\n+ # set parameters\n+ n_actions = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 100\n+ dataset = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ )\n+ # get feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+ pscore_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ bandit_feedback_df = pd.DataFrame()\n+ for column in [\"impression_id\", \"position\", \"action\"] + pscore_columns:\n+ bandit_feedback_df[column] = bandit_feedback[column]\n+ # check pscore marginal\n+ pscore_marginal = float(len_list / n_actions)\n+ assert np.allclose(\n+ bandit_feedback_df[\"pscore_marginal\"].unique(), [pscore_marginal]\n+ ), f\"pscore_marginal must be [{pscore_marginal}], but {bandit_feedback_df['pscore_marginal'].unique()}\"\n+ # check pscore joint\n+ pscore_joint_above = []\n+ pscore_above = 1.0\n+ for position_ in range(len_list):\n+ pscore_above = pscore_above * 1.0 / (n_actions - position_)\n+ pscore_joint_above.append(pscore_above)\n+ assert np.allclose(\n+ bandit_feedback_df[\"pscore_joint_above\"], np.tile(pscore_joint_above, n_rounds)\n+ ), f\"pscore_joint_above must be {pscore_joint_above} for all impresessions\"\n+ assert np.allclose(\n+ bandit_feedback_df[\"pscore_joint_all\"].unique(), [pscore_above]\n+ ), f\"pscore_joint_all must be {pscore_above} for all impressions\"\n+\n+\n+def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy():\n+ # set parameters\n+ n_actions = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 100\n+ dataset = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ )\n+ # get feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+\n+\n+def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_without_pscore_marginal():\n+ # set parameters\n+ n_actions = 80\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 100\n+ dataset = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ )\n+ # get feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_pscore_marginal=False\n+ )\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+ assert (\n+ bandit_feedback[\"pscore_marginal\"] is None\n+ ), f\"pscore marginal must be None, but {bandit_feedback['pscore_marginal']}\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add slate bandit dataset |
641,006 | 15.03.2021 02:28:39 | -32,400 | 3d6ca4e79066e197e66bb1ee4152ad5a5a5a3c7d | add basic reward functions | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -127,6 +127,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nlen_list: int\ndim_context: int = 1\nreward_type: str = \"binary\"\n+ reward_structure: str = \"RIPS\"\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n@@ -157,8 +158,19 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n\"continuous\",\n]:\nraise ValueError(\n- f\"reward_type must be either 'binary' or 'continuous, but {self.reward_type} is given.'\"\n+ f\"reward_type must be either 'binary' or 'continuous', but {self.reward_type} is given.'\"\n)\n+ if self.reward_structure not in [\"RIPS\", \"SIPS\", \"IIPS\"]:\n+ raise ValueError(\n+ f\"reward_structure must be either 'RIPS', 'SIPS', or 'IIPS', but {self.reward_structure} is given.'\"\n+ )\n+ # TODO: implement slot_weight\n+ if self.reward_structure == \"IIPS\":\n+ self.slot_weight = np.identity(self.len_list)\n+ elif self.reward_structure == \"SIPS\":\n+ self.slot_weight = None\n+ else:\n+ self.slot_weight = None\nif not isinstance(self.random_state, int):\nraise ValueError(\"random_state must be an integer\")\nself.random_ = check_random_state(self.random_state)\n@@ -275,12 +287,21 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nn_rounds=n_rounds,\nreturn_pscore_marginal=return_pscore_marginal,\n)\n- # action_3d = np.identity(self.n_actions)[\n- # action.reshape((n_rounds, self.len_list))\n- # ]\n- reward = np.zeros(n_rounds * self.len_list, dtype=int)\n+ if self.reward_function is None:\n+ expected_reward = self.sample_contextfree_expected_reward()\n+ expected_reward_factual, reward = self.sample_reward_given_expected_reward(\n+ expected_reward=expected_reward, action=action, n_rounds=n_rounds\n+ )\n+ else:\n+ expected_reward_factual, reward = self.reward_function(\n+ context=context,\n+ action_context=self.action_context,\n+ action=action,\n+ slot_weight=self.slot_weight,\n+ random_state=self.random_state,\n+ )\nreturn dict(\nn_rounds=n_rounds,\nn_actions=self.n_actions,\n@@ -296,6 +317,95 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore_marginal=pscore_marginal,\n)\n+ def sample_contextfree_expected_reward(self) -> np.ndarray:\n+ \"\"\"Sample expected reward for each action and slot from the uniform distribution\"\"\"\n+ return self.random_.uniform(size=(self.n_actions, self.len_list))\n+\n+ def sample_reward_given_expected_reward(\n+ self, expected_reward: np.ndarray, action: np.ndarray, n_rounds: int\n+ ) -> Tuple[np.ndarray, np.ndarray]:\n+ # weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n+ expected_reward_ = np.tile(expected_reward, (n_rounds, 1, 1))\n+ # action_2d: array-like, shape (n_rounds, len_list)\n+ action_2d = action.reshape((n_rounds, self.len_list))\n+ # expected_reward_factual_actions: list, shape (len_list, n_rounds)\n+ expected_reward_factual = [\n+ expected_reward_[np.arange(n_rounds), action_2d[:, position_], position_]\n+ for position_ in range(self.len_list)\n+ ]\n+ if self.reward_type == \"binary\":\n+ reward = np.array(\n+ [\n+ self.random_.binomial(n=1, p=expected_reward_factual[position_])\n+ for position_ in range(self.len_list)\n+ ]\n+ ).T\n+ elif self.reward_type == \"continuous\":\n+ reward = np.zeros((self.n_actions, self.len_list))\n+ for position_ in range(self.len_list):\n+ mean = expected_reward_factual\n+ a = (self.reward_min - mean) / self.reward_std\n+ b = (self.reward_max - mean) / self.reward_std\n+ reward[:, position_] = truncnorm.rvs(\n+ a=a,\n+ b=b,\n+ loc=mean,\n+ scale=self.reward_std,\n+ random_state=self.random_state,\n+ )\n+ else:\n+ raise NotImplementedError\n+ # return: two arrays, shape (n_rounds, len_list)\n+ return np.array(expected_reward_factual).T, reward\n+\n+\n+def logistic_weighted_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ action: np.ndarray,\n+ slot_weight: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> Tuple[np.ndarray, np.ndarray]:\n+ \"\"\"\n+ slot_weight: array-like, shape (len_list, len_list)\n+ \"\"\"\n+ random_ = check_random_state(random_state)\n+ # action_2d: array-like, shape (n_rounds, len_list)\n+ action_2d = action.reshape((context.shape[0], slot_weight.shape[0]))\n+ # action_3d: array-like, shape (n_rounds, n_actions, len_list)\n+ action_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)\n+ if slot_weight.shape[0] < action_3d.shape[2]:\n+ raise ValueError(\n+ \"the size of axis 0 of slot_weight must be the same as the size of axis 1 of action_3d\"\n+ )\n+ # expected_reward: array-like, shape (n_rounds, n_actions)\n+ expected_reward = logistic_reward_function(\n+ context=context, action_context=action_context, random_state=random_state\n+ )\n+ # expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n+ expected_reward_3d = np.tile(\n+ expected_reward, (slot_weight.shape[0], 1, 1)\n+ ).transpose(1, 2, 0)\n+ # action_weight: array-like, shape (n_actions, len_list)\n+ action_weight = action_3d @ slot_weight.T\n+ # weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n+ weighted_expected_reward = action_weight * expected_reward_3d\n+ # expected_reward_factual_actions: list, shape (len_list, n_rounds)\n+ expected_reward_factual = [\n+ weighted_expected_reward[\n+ np.arange(context.shape[0]), action_2d[:, position_], position_\n+ ]\n+ for position_ in range(slot_weight.shape[0])\n+ ]\n+ reward = np.array(\n+ [\n+ random_.binomial(n=1, p=expected_reward_factual[position_])\n+ for position_ in range(slot_weight.shape[0])\n+ ]\n+ ).T\n+ # return: two arrays, shape (n_rounds, len_list)\n+ return np.array(expected_reward_factual).T, reward\n+\ndef logistic_reward_function(\ncontext: np.ndarray,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add basic reward functions |
641,006 | 16.03.2021 22:28:46 | -32,400 | 46ea0ce278cab090dc589ed69743e61eeefd63b1 | add reward generator | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -128,6 +128,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndim_context: int = 1\nreward_type: str = \"binary\"\nreward_structure: str = \"RIPS\"\n+ exam_weight: Optional[np.ndarray] = None\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n@@ -164,13 +165,20 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"reward_structure must be either 'RIPS', 'SIPS', or 'IIPS', but {self.reward_structure} is given.'\"\n)\n- # TODO: implement slot_weight\n+ if self.exam_weight is None:\n+ self.exam_weight = np.ones(self.len_list)\n+ else:\n+ if not isinstance(self.exam_weight, np.ndarray):\n+ raise ValueError(\n+ f\"exam_weight must be ndarray or None, but {self.exam_weight} is given\"\n+ )\n+ # TODO: implement slot_weight_matrix\nif self.reward_structure == \"IIPS\":\n- self.slot_weight = np.identity(self.len_list)\n+ self.slot_weight_matrix = np.identity(self.len_list)\nelif self.reward_structure == \"SIPS\":\n- self.slot_weight = None\n+ self.slot_weight_matrix = self.get_sips_slot_weight(self.len_list)\nelse:\n- self.slot_weight = None\n+ self.slot_weight_matrix = self.get_rips_slot_weight(self.len_list)\nif not isinstance(self.random_state, int):\nraise ValueError(\"random_state must be an integer\")\nself.random_ = check_random_state(self.random_state)\n@@ -183,6 +191,27 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n# one-hot encoding representations characterizing each action\nself.action_context = np.eye(self.n_actions, dtype=int)\n+ @staticmethod\n+ def get_sips_slot_weight(len_list):\n+ slot_weight_matrix = np.ones((len_list, len_list))\n+ for position_ in range(len_list):\n+ slot_weight_matrix[:, position_] = 1 / np.exp(\n+ np.abs(np.arange(len_list) - position_)\n+ )\n+ return slot_weight_matrix\n+\n+ @staticmethod\n+ def get_rips_slot_weight(len_list):\n+ slot_weight_matrix = np.ones((len_list, len_list))\n+ for position_ in range(len_list):\n+ slot_weight_matrix[:, position_] = 1 / np.exp(\n+ np.abs(np.arange(len_list) - position_)\n+ )\n+ for position_2 in range(len_list):\n+ if position_ < position_2:\n+ slot_weight_matrix[position_2, position_] = 0\n+ return slot_weight_matrix\n+\ndef get_marginal_pscore(\nself, perm: List[int], behavior_policy_logit_i_: np.ndarray\n) -> float:\n@@ -243,6 +272,47 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn action, pscore_joint_above, pscore_joint_all, pscore_marginal\n+ def sample_contextfree_expected_reward(self) -> np.ndarray:\n+ \"\"\"Sample expected reward for each action and slot from the uniform distribution\"\"\"\n+ return self.random_.uniform(size=(self.n_actions, self.len_list))\n+\n+ def sample_reward_given_expected_reward(\n+ self, expected_reward: np.ndarray, action: np.ndarray, n_rounds: int\n+ ) -> Tuple[np.ndarray, np.ndarray]:\n+ # weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n+ expected_reward_ = np.tile(expected_reward, (n_rounds, 1, 1))\n+ # action_2d: array-like, shape (n_rounds, len_list)\n+ action_2d = action.reshape((n_rounds, self.len_list))\n+ # expected_reward_factual_actions: list, shape (len_list, n_rounds)\n+ expected_reward_factual = [\n+ expected_reward_[np.arange(n_rounds), action_2d[:, position_], position_]\n+ for position_ in range(self.len_list)\n+ ]\n+ if self.reward_type == \"binary\":\n+ reward = np.array(\n+ [\n+ self.random_.binomial(n=1, p=expected_reward_factual[position_])\n+ for position_ in range(self.len_list)\n+ ]\n+ ).T\n+ elif self.reward_type == \"continuous\":\n+ reward = np.zeros((self.n_actions, self.len_list))\n+ for position_ in range(self.len_list):\n+ mean = expected_reward_factual\n+ a = (self.reward_min - mean) / self.reward_std\n+ b = (self.reward_max - mean) / self.reward_std\n+ reward[:, position_] = truncnorm.rvs(\n+ a=a,\n+ b=b,\n+ loc=mean,\n+ scale=self.reward_std,\n+ random_state=self.random_state,\n+ )\n+ else:\n+ raise NotImplementedError\n+ # return: two arrays, shape (n_rounds, len_list)\n+ return np.array(expected_reward_factual).T, reward\n+\ndef obtain_batch_bandit_feedback(\nself,\nn_rounds: int,\n@@ -299,7 +369,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ncontext=context,\naction_context=self.action_context,\naction=action,\n- slot_weight=self.slot_weight,\n+ slot_weight_matrix=self.slot_weight_matrix,\n+ exam_weight=self.exam_weight,\nrandom_state=self.random_state,\n)\nreturn dict(\n@@ -310,101 +381,61 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_context=self.action_context,\naction=action,\nposition=np.tile(range(self.len_list), n_rounds),\n- reward=reward,\n- expected_reward=None,\n+ reward=reward.reshape(action.shape[0]),\n+ expected_reward_factual=expected_reward_factual.reshape(action.shape[0]),\npscore_joint_above=pscore_joint_above,\npscore_joint_all=pscore_joint_all,\npscore_marginal=pscore_marginal,\n)\n- def sample_contextfree_expected_reward(self) -> np.ndarray:\n- \"\"\"Sample expected reward for each action and slot from the uniform distribution\"\"\"\n- return self.random_.uniform(size=(self.n_actions, self.len_list))\n-\n- def sample_reward_given_expected_reward(\n- self, expected_reward: np.ndarray, action: np.ndarray, n_rounds: int\n- ) -> Tuple[np.ndarray, np.ndarray]:\n- # weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n- expected_reward_ = np.tile(expected_reward, (n_rounds, 1, 1))\n- # action_2d: array-like, shape (n_rounds, len_list)\n- action_2d = action.reshape((n_rounds, self.len_list))\n- # expected_reward_factual_actions: list, shape (len_list, n_rounds)\n- expected_reward_factual = [\n- expected_reward_[np.arange(n_rounds), action_2d[:, position_], position_]\n- for position_ in range(self.len_list)\n- ]\n- if self.reward_type == \"binary\":\n- reward = np.array(\n- [\n- self.random_.binomial(n=1, p=expected_reward_factual[position_])\n- for position_ in range(self.len_list)\n- ]\n- ).T\n- elif self.reward_type == \"continuous\":\n- reward = np.zeros((self.n_actions, self.len_list))\n- for position_ in range(self.len_list):\n- mean = expected_reward_factual\n- a = (self.reward_min - mean) / self.reward_std\n- b = (self.reward_max - mean) / self.reward_std\n- reward[:, position_] = truncnorm.rvs(\n- a=a,\n- b=b,\n- loc=mean,\n- scale=self.reward_std,\n- random_state=self.random_state,\n- )\n- else:\n- raise NotImplementedError\n- # return: two arrays, shape (n_rounds, len_list)\n- return np.array(expected_reward_factual).T, reward\n-\ndef logistic_weighted_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\naction: np.ndarray,\n- slot_weight: np.ndarray,\n+ slot_weight_matrix: np.ndarray,\n+ exam_weight: np.ndarray,\nrandom_state: Optional[int] = None,\n+ **kwargs,\n) -> Tuple[np.ndarray, np.ndarray]:\n\"\"\"\n- slot_weight: array-like, shape (len_list, len_list)\n+ slot_weight_matrix: array-like, shape (len_list, len_list)\n\"\"\"\n+ # fix slot_weight_matrix by exam_weight\n+ slot_weight_matrix = slot_weight_matrix * exam_weight\nrandom_ = check_random_state(random_state)\n# action_2d: array-like, shape (n_rounds, len_list)\n- action_2d = action.reshape((context.shape[0], slot_weight.shape[0]))\n+ action_2d = action.reshape((context.shape[0], slot_weight_matrix.shape[0]))\n# action_3d: array-like, shape (n_rounds, n_actions, len_list)\naction_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)\n- if slot_weight.shape[0] < action_3d.shape[2]:\n+ if slot_weight_matrix.shape[0] < action_3d.shape[2]:\nraise ValueError(\n- \"the size of axis 0 of slot_weight must be the same as the size of axis 1 of action_3d\"\n+ \"the size of axis 0 of slot_weight_matrix must be the same as the size of axis 1 of action_3d\"\n)\n# expected_reward: array-like, shape (n_rounds, n_actions)\nexpected_reward = logistic_reward_function(\ncontext=context, action_context=action_context, random_state=random_state\n)\n- # expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n+ # expected_reward_3d: array-like, shape (n_rounds, n_actions, len_list)\nexpected_reward_3d = np.tile(\n- expected_reward, (slot_weight.shape[0], 1, 1)\n+ expected_reward, (slot_weight_matrix.shape[0], 1, 1)\n).transpose(1, 2, 0)\n- # action_weight: array-like, shape (n_actions, len_list)\n- action_weight = action_3d @ slot_weight.T\n+ # action_weight: array-like, shape (n_rounds, n_actions, len_list)\n+ action_weight = action_3d @ slot_weight_matrix\n# weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\nweighted_expected_reward = action_weight * expected_reward_3d\n- # expected_reward_factual_actions: list, shape (len_list, n_rounds)\n- expected_reward_factual = [\n- weighted_expected_reward[\n- np.arange(context.shape[0]), action_2d[:, position_], position_\n- ]\n- for position_ in range(slot_weight.shape[0])\n- ]\n+ # expected_reward_factual: list, shape (n_rounds, len_list)\n+ expected_reward_factual = (\n+ weighted_expected_reward.sum(axis=1) / slot_weight_matrix.shape[0]\n+ )\nreward = np.array(\n[\n- random_.binomial(n=1, p=expected_reward_factual[position_])\n- for position_ in range(slot_weight.shape[0])\n+ random_.binomial(n=1, p=expected_reward_factual[:, position_])\n+ for position_ in range(slot_weight_matrix.shape[0])\n]\n- ).T\n+ )\n# return: two arrays, shape (n_rounds, len_list)\n- return np.array(expected_reward_factual).T, reward\n+ return np.array(expected_reward_factual), reward.T\ndef logistic_reward_function(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -6,8 +6,7 @@ import pandas as pd\nfrom obp.dataset import SyntheticSlateBanditDataset\nfrom obp.dataset.synthetic_slate import (\n- logistic_reward_function,\n- linear_reward_function,\n+ logistic_weighted_reward_function,\nlinear_behavior_policy_logit,\n)\nfrom obp.types import BanditFeedback\n@@ -196,6 +195,16 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nbandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n# check slate bandit feedback (common test)\ncheck_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+ # print reward\n+ pscore_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ bandit_feedback_df = pd.DataFrame()\n+ for column in [\"impression_id\", \"position\", \"action\", \"reward\"] + pscore_columns:\n+ bandit_feedback_df[column] = bandit_feedback[column]\n+ print(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\ndef test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_without_pscore_marginal():\n@@ -223,3 +232,188 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nassert (\nbandit_feedback[\"pscore_marginal\"] is None\n), f\"pscore marginal must be None, but {bandit_feedback['pscore_marginal']}\"\n+\n+\n+def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_and_sips_logistic_reward():\n+ # set parameters\n+ n_actions = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 100\n+ reward_structure = \"SIPS\"\n+ dataset = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ exam_weight=1 / np.exp(np.arange(len_list)),\n+ random_state=random_state,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ reward_function=logistic_weighted_reward_function,\n+ )\n+ # get feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_pscore_marginal=False\n+ )\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+ pscore_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ bandit_feedback_df = pd.DataFrame()\n+ for column in [\"impression_id\", \"position\", \"action\", \"reward\"] + pscore_columns:\n+ bandit_feedback_df[column] = bandit_feedback[column]\n+ print(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\n+\n+\n+def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_and_rips_logistic_reward():\n+ # set parameters\n+ n_actions = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 100\n+ reward_structure = \"RIPS\"\n+ dataset = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ exam_weight=1 / np.exp(np.arange(len_list)),\n+ random_state=random_state,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ reward_function=logistic_weighted_reward_function,\n+ )\n+ # get feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_pscore_marginal=False\n+ )\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+ pscore_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ bandit_feedback_df = pd.DataFrame()\n+ for column in [\"impression_id\", \"position\", \"action\", \"reward\"] + pscore_columns:\n+ bandit_feedback_df[column] = bandit_feedback[column]\n+ print(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\n+\n+\n+def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_and_rips_logistic_reward():\n+ # set parameters\n+ n_actions = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 123\n+ n_rounds = 10000\n+ dataset_r = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=\"RIPS\",\n+ exam_weight=1 / np.exp(np.arange(len_list)),\n+ random_state=random_state,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ reward_function=logistic_weighted_reward_function,\n+ )\n+ # get feedback\n+ bandit_feedback_r = dataset_r.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_pscore_marginal=False\n+ )\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback_r)\n+ pscore_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ bandit_feedback_df_r = pd.DataFrame()\n+ for column in [\n+ \"impression_id\",\n+ \"position\",\n+ \"action\",\n+ \"reward\",\n+ \"expected_reward_factual\",\n+ ] + pscore_columns:\n+ bandit_feedback_df_r[column] = bandit_feedback_r[column]\n+ print(bandit_feedback_df_r.groupby(\"position\")[\"reward\"].describe())\n+ # sips\n+ dataset_s = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=\"SIPS\",\n+ exam_weight=1 / np.exp(np.arange(len_list)),\n+ random_state=random_state,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ reward_function=logistic_weighted_reward_function,\n+ )\n+ # get feedback\n+ bandit_feedback_s = dataset_s.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_pscore_marginal=False\n+ )\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback_s)\n+ pscore_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ bandit_feedback_df_s = pd.DataFrame()\n+ for column in [\n+ \"impression_id\",\n+ \"position\",\n+ \"action\",\n+ \"reward\",\n+ \"expected_reward_factual\",\n+ ] + pscore_columns:\n+ bandit_feedback_df_s[column] = bandit_feedback_s[column]\n+ print(bandit_feedback_df_s.groupby(\"position\")[\"reward\"].describe())\n+ # iips\n+ dataset_i = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=\"IIPS\",\n+ exam_weight=1 / np.exp(np.arange(len_list)),\n+ random_state=random_state,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ reward_function=logistic_weighted_reward_function,\n+ )\n+ # get feedback\n+ bandit_feedback_i = dataset_i.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_pscore_marginal=False\n+ )\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback_i)\n+ pscore_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ bandit_feedback_df_i = pd.DataFrame()\n+ for column in [\n+ \"impression_id\",\n+ \"position\",\n+ \"action\",\n+ \"reward\",\n+ \"expected_reward_factual\",\n+ ] + pscore_columns:\n+ bandit_feedback_df_i[column] = bandit_feedback_i[column]\n+ print(bandit_feedback_df_i.groupby(\"position\")[\"reward\"].describe())\n+ # import pdb\n+\n+ # pdb.set_trace()\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add reward generator |
641,003 | 20.03.2021 11:02:31 | -32,400 | ef12cf06cb89b37dd64fc5b6bf385dbbc15fdb8e | change reward_type to enum | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "obp/dataset/reward_type.py",
"diff": "+import enum\n+\n+\n+class RewardType(enum.Enum):\n+ \"\"\"Reward type.\n+\n+ Attributes\n+ ----------\n+ BINARY:\n+ The reward type is binary.\n+ CONTINUOUS:\n+ The reward type is continuous.\n+ \"\"\"\n+\n+ BINARY = \"binary\"\n+ CONTINUOUS = \"continuous\"\n+\n+ def __repr__(self) -> str:\n+\n+ return str(self)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -12,6 +12,7 @@ from sklearn.utils import check_random_state\nfrom .base import BaseBanditDataset\nfrom ..types import BanditFeedback\nfrom ..utils import sigmoid, softmax\n+from .reward_type import RewardType\n@dataclass\n@@ -123,7 +124,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nn_actions: int\ndim_context: int = 1\n- reward_type: str = \"binary\"\n+ reward_type: str = RewardType.BINARY.value\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n@@ -141,12 +142,12 @@ class SyntheticBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"dim_context must be a positive integer, but {self.dim_context} is given\"\n)\n- if self.reward_type not in [\n- \"binary\",\n- \"continuous\",\n+ if RewardType(self.reward_type) not in [\n+ RewardType.BINARY,\n+ RewardType.CONTINUOUS,\n]:\nraise ValueError(\n- f\"reward_type must be either 'binary' or 'continuous, but {self.reward_type} is given.'\"\n+ f\"reward_type must be either '{RewardType.BINARY.value}' or '{RewardType.CONTINUOUS.value}', but {self.reward_type} is given.'\"\n)\nif self.random_state is None:\nraise ValueError(\"random_state must be given\")\n@@ -155,7 +156,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nself.expected_reward = self.sample_contextfree_expected_reward()\nif self.behavior_policy_function is None:\nself.behavior_policy = np.ones(self.n_actions) / self.n_actions\n- if self.reward_type == \"continuous\":\n+ if RewardType(self.reward_type) == RewardType.CONTINUOUS:\nself.reward_min = 0\nself.reward_max = 1e10\nself.reward_std = 1.0\n@@ -192,9 +193,9 @@ class SyntheticBanditDataset(BaseBanditDataset):\n) -> np.ndarray:\n\"\"\"Sample reward given expected rewards\"\"\"\nexpected_reward_factual = expected_reward[np.arange(action.shape[0]), action]\n- if self.reward_type == \"binary\":\n+ if RewardType(self.reward_type) == RewardType.BINARY:\nreward = self.random_.binomial(n=1, p=expected_reward_factual)\n- elif self.reward_type == \"continuous\":\n+ elif RewardType(self.reward_type) == RewardType.CONTINUOUS:\nmean = expected_reward_factual\na = (self.reward_min - mean) / self.reward_std\nb = (self.reward_max - mean) / self.reward_std\n@@ -290,7 +291,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nexpected_reward_ = self.calc_expected_reward(context)\nreward = self.sample_reward_given_expected_reward(expected_reward_, action)\n- if self.reward_type == \"continuous\":\n+ if RewardType(self.reward_type) == RewardType.CONTINUOUS:\n# correct expected_reward_, as we use truncated normal distribution here\nmean = expected_reward_\na = (self.reward_min - mean) / self.reward_std\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | change reward_type to enum |
641,003 | 20.03.2021 11:24:42 | -32,400 | 83013355a91ee884f48b83d817c8988c4af9b180 | change policy_type to enum | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/base.py",
"new_path": "obp/policy/base.py",
"diff": "@@ -9,6 +9,8 @@ from typing import Optional\nimport numpy as np\nfrom sklearn.utils import check_random_state\n+from .policy_type import PolicyType\n+\n@dataclass\nclass BaseContextFreePolicy(metaclass=ABCMeta):\n@@ -66,9 +68,9 @@ class BaseContextFreePolicy(metaclass=ABCMeta):\nself.reward_counts = np.zeros(self.n_actions)\n@property\n- def policy_type(self) -> str:\n+ def policy_type(self) -> PolicyType:\n\"\"\"Type of the bandit policy.\"\"\"\n- return \"contextfree\"\n+ return PolicyType.CONTEXT_FREE\ndef initialize(self) -> None:\n\"\"\"Initialize Parameters.\"\"\"\n@@ -172,9 +174,9 @@ class BaseContextualPolicy(metaclass=ABCMeta):\nself.context_lists = [[] for _ in np.arange(self.n_actions)]\n@property\n- def policy_type(self) -> str:\n+ def policy_type(self) -> PolicyType:\n\"\"\"Type of the bandit policy.\"\"\"\n- return \"contextual\"\n+ return PolicyType.CONTEXTUAL\ndef initialize(self) -> None:\n\"\"\"Initialize policy parameters.\"\"\"\n@@ -231,9 +233,9 @@ class BaseOfflinePolicyLearner(metaclass=ABCMeta):\n)\n@property\n- def policy_type(self) -> str:\n+ def policy_type(self) -> PolicyType:\n\"\"\"Type of the bandit policy.\"\"\"\n- return \"offline\"\n+ return PolicyType.OFFLINE\n@abstractmethod\ndef fit(\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "obp/policy/policy_type.py",
"diff": "+import enum\n+\n+\n+class PolicyType(enum.Enum):\n+ \"\"\"Policy type.\n+\n+ Attributes\n+ ----------\n+ CONTEXT_FREE:\n+ The policy type is contextfree.\n+ CONTEXTUAL:\n+ The policy type is contextual.\n+ OFFLINE:\n+ The policy type is offline.\n+ \"\"\"\n+\n+ CONTEXT_FREE = 0\n+ CONTEXTUAL = 1\n+ OFFLINE = 2\n+\n+ def __repr__(self) -> str:\n+\n+ return str(self)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "\"\"\"Bandit Simulator.\"\"\"\nfrom copy import deepcopy\n-from typing import Optional, Callable\n+from typing import Callable\nfrom tqdm import tqdm\n@@ -11,6 +11,7 @@ import numpy as np\nfrom ..utils import check_bandit_feedback_inputs, convert_to_action_dist\nfrom ..types import BanditFeedback, BanditPolicy\n+from ..policy.policy_type import PolicyType\ndef run_bandit_simulation(\n@@ -61,17 +62,17 @@ def run_bandit_simulation(\n):\n# select a list of actions\n- if policy_.policy_type == \"contextfree\":\n+ if policy_.policy_type == PolicyType.CONTEXT_FREE:\nselected_actions = policy_.select_action()\n- elif policy_.policy_type == \"contextual\":\n+ elif policy_.policy_type == PolicyType.CONTEXTUAL:\nselected_actions = policy_.select_action(context_.reshape(1, dim_context))\naction_match_ = action_ == selected_actions[position_]\n# update parameters of a bandit policy\n# only when selected actions&positions are equal to logged actions&positions\nif action_match_:\n- if policy_.policy_type == \"contextfree\":\n+ if policy_.policy_type == PolicyType.CONTEXT_FREE:\npolicy_.update_params(action=action_, reward=reward_)\n- elif policy_.policy_type == \"contextual\":\n+ elif policy_.policy_type == PolicyType.CONTEXTUAL:\npolicy_.update_params(\naction=action_,\nreward=reward_,\n@@ -144,9 +145,9 @@ def calc_ground_truth_policy_value(\n):\n# select a list of actions\n- if policy_.policy_type == \"contextfree\":\n+ if policy_.policy_type == PolicyType.CONTEXT_FREE:\nselected_actions = policy_.select_action()\n- elif policy_.policy_type == \"contextual\":\n+ elif policy_.policy_type == PolicyType.CONTEXTUAL:\nselected_actions = policy_.select_action(\ncontext_.reshape(1, dim_context)\n)\n@@ -158,9 +159,9 @@ def calc_ground_truth_policy_value(\ncumulative_reward += expected_reward_[action]\n# update parameters of a bandit policy\n- if policy_.policy_type == \"contextfree\":\n+ if policy_.policy_type == PolicyType.CONTEXT_FREE:\npolicy_.update_params(action=action, reward=reward)\n- elif policy_.policy_type == \"contextual\":\n+ elif policy_.policy_type == PolicyType.CONTEXTUAL:\npolicy_.update_params(\naction=action,\nreward=reward,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_contextfree.py",
"new_path": "tests/policy/test_contextfree.py",
"diff": "@@ -4,6 +4,7 @@ import numpy as np\nfrom obp.policy.contextfree import EpsilonGreedy\nfrom obp.policy.contextfree import Random\nfrom obp.policy.contextfree import BernoulliTS\n+from obp.policy.policy_type import PolicyType\ndef test_contextfree_base_exception():\n@@ -44,6 +45,9 @@ def test_egreedy_normal_epsilon():\npolicy2 = EpsilonGreedy(n_actions=3, epsilon=0.3)\nassert 0 <= policy2.epsilon <= 1\n+ # policy type\n+ assert EpsilonGreedy(n_actions=2).policy_type == PolicyType.CONTEXT_FREE\n+\ndef test_egreedy_abnormal_epsilon():\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_linear.py",
"new_path": "tests/policy/test_linear.py",
"diff": "@@ -4,6 +4,7 @@ import numpy as np\nfrom obp.policy.linear import LinEpsilonGreedy\nfrom obp.policy.linear import LinUCB\nfrom obp.policy.linear import LinTS\n+from obp.policy.policy_type import PolicyType\ndef test_linear_base_exception():\n@@ -86,6 +87,10 @@ def test_lin_epsilon_abnormal_epsilon():\ndef test_lin_epsilon_select_action_exploitation():\ntrial_num = 50\npolicy = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=0.0)\n+\n+ # policy type\n+ assert policy.policy_type == PolicyType.CONTEXTUAL\n+\ncontext = np.array([1.0, 1.0]).reshape(1, -1)\npolicy.update_params(action=0, reward=1.0, context=context)\npolicy.update_params(action=0, reward=1.0, context=context)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline.py",
"new_path": "tests/policy/test_offline.py",
"diff": "@@ -4,6 +4,7 @@ from sklearn.linear_model import LogisticRegression\nfrom obp.policy.offline import IPWLearner\n+from obp.policy.policy_type import PolicyType\ndef test_base_opl_init():\n@@ -22,7 +23,7 @@ def test_base_opl_init():\nIPWLearner(n_actions=2, len_list=\"3\")\n# policy_type\n- assert IPWLearner(n_actions=2).policy_type == \"offline\"\n+ assert IPWLearner(n_actions=2).policy_type == PolicyType.OFFLINE\n# invalid relationship between n_actions and len_list\nwith pytest.raises(ValueError):\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | change policy_type to enum |
641,003 | 20.03.2021 12:19:55 | -32,400 | 9fdddedcf9911e43b4f28526876dc21eeb06814e | fix flake8 error F541 | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/multiclass.py",
"new_path": "obp/dataset/multiclass.py",
"diff": "@@ -150,7 +150,7 @@ class MultiClassToBanditReduction(BaseBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\nif not is_classifier(self.base_classifier_b):\n- raise ValueError(f\"base_classifier_b must be a classifier\")\n+ raise ValueError(\"base_classifier_b must be a classifier\")\nif not isinstance(self.alpha_b, float) or not (0.0 <= self.alpha_b < 1.0):\nraise ValueError(\nf\"alpha_b must be a float in the [0,1) interval, but {self.alpha_b} is given\"\n@@ -289,7 +289,7 @@ class MultiClassToBanditReduction(BaseBanditDataset):\nelse:\nassert is_classifier(\nbase_classifier_e\n- ), f\"base_classifier_e must be a classifier\"\n+ ), \"base_classifier_e must be a classifier\"\nbase_clf_e = clone(base_classifier_e)\nbase_clf_e.fit(X=self.X_tr, y=self.y_tr)\npreds = base_clf_e.predict(self.X_ev).astype(int)\n@@ -319,7 +319,7 @@ class MultiClassToBanditReduction(BaseBanditDataset):\n\"\"\"\nif not isinstance(action_dist, np.ndarray) or action_dist.ndim != 3:\n- raise ValueError(f\"action_dist must be a 3-D np.ndarray\")\n+ raise ValueError(\"action_dist must be a 3-D np.ndarray\")\nif action_dist.shape[0] != self.n_rounds_ev:\nraise ValueError(\n\"the size of axis 0 of action_dist must be the same as the number of samples in the evaluation set\"\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/linear.py",
"new_path": "obp/policy/linear.py",
"diff": "@@ -282,7 +282,7 @@ class LinTS(BaseContextualPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- self.policy_name = f\"linear_ts\"\n+ self.policy_name = \"linear_ts\"\nsuper().__post_init__()\nself.A_inv = np.concatenate(\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -323,7 +323,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\n\"\"\"\nassert (\nself.len_list == 1\n- ), f\"predict_proba method can be used only when len_list = 1\"\n+ ), \"predict_proba method can be used only when len_list = 1\"\nassert (\nisinstance(context, np.ndarray) and context.ndim == 2\n), \"context must be 2-dimensional ndarray\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix flake8 error F541 |
641,003 | 20.03.2021 12:44:35 | -32,400 | 8e42430714ffc3b450d3dd03acab507e666b6be6 | fix flake8 error for dataset/__init__.py | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "-from .base import *\n-from .real import *\n-from .synthetic import *\n-from .multiclass import *\n+from obp.dataset.base import BaseBanditDataset\n+from obp.dataset.base import BaseRealBanditDataset\n+from obp.dataset.real import OpenBanditDataset\n+from obp.dataset.synthetic import SyntheticBanditDataset\n+from obp.dataset.multiclass import MultiClassToBanditReduction\n+\n+\n+__all__ = [\n+ \"BaseBanditDataset\",\n+ \"BaseRealBanditDataset\",\n+ \"OpenBanditDataset\",\n+ \"SyntheticBanditDataset\",\n+ \"MultiClassToBanditReduction\",\n+]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -10,6 +10,20 @@ from obp.ope.estimators import DoublyRobustWithShrinkage\nfrom obp.ope.meta import OffPolicyEvaluation\nfrom obp.ope.regression_model import RegressionModel\n+__all__ = [\n+ \"BaseOffPolicyEstimator\",\n+ \"ReplayMethod\",\n+ \"InverseProbabilityWeighting\",\n+ \"SelfNormalizedInverseProbabilityWeighting\",\n+ \"DirectMethod\",\n+ \"DoublyRobust\",\n+ \"SelfNormalizedDoublyRobust\",\n+ \"SwitchDoublyRobust\",\n+ \"DoublyRobustWithShrinkage\",\n+ \"OffPolicyEvaluation\",\n+ \"RegressionModel\",\n+]\n+\n__all_estimators__ = [\n\"ReplayMethod\",\n\"InverseProbabilityWeighting\",\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix flake8 error for dataset/__init__.py |
641,003 | 20.03.2021 12:53:19 | -32,400 | 81b79b318c11c24b67f387a074ba3d2f2601ddd8 | fix flake8 error for simulation/__init__.py | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -2,6 +2,9 @@ from obp.dataset.base import BaseBanditDataset\nfrom obp.dataset.base import BaseRealBanditDataset\nfrom obp.dataset.real import OpenBanditDataset\nfrom obp.dataset.synthetic import SyntheticBanditDataset\n+from obp.dataset.synthetic import logistic_reward_function\n+from obp.dataset.synthetic import linear_reward_function\n+from obp.dataset.synthetic import linear_behavior_policy\nfrom obp.dataset.multiclass import MultiClassToBanditReduction\n@@ -10,5 +13,8 @@ __all__ = [\n\"BaseRealBanditDataset\",\n\"OpenBanditDataset\",\n\"SyntheticBanditDataset\",\n+ \"logistic_reward_function\",\n+ \"linear_reward_function\",\n+ \"linear_behavior_policy\",\n\"MultiClassToBanditReduction\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/simulator/__init__.py",
"new_path": "obp/simulator/__init__.py",
"diff": "-from .simulator import *\n+from obp.simulator.simulator import run_bandit_simulation\n+from obp.simulator.simulator import calc_ground_truth_policy_value\n+\n+\n+__all__ = [\n+ \"run_bandit_simulation\",\n+ \"calc_ground_truth_policy_value\",\n+]\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix flake8 error for simulation/__init__.py |
641,003 | 20.03.2021 13:38:17 | -32,400 | 436dc900b9a1af49f390f8499159b730459212c3 | replace expand_dims with np.newaxis | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/multiclass.py",
"new_path": "obp/dataset/multiclass.py",
"diff": "@@ -299,7 +299,7 @@ class MultiClassToBanditReduction(BaseBanditDataset):\npi_e[np.arange(self.n_rounds_ev), preds] = (\nalpha_e + (1.0 - alpha_e) / self.n_actions\n)\n- return np.expand_dims(pi_e, 2)\n+ return pi_e[:, :, np.newaxis]\ndef calc_ground_truth_policy_value(self, action_dist: np.ndarray) -> float:\n\"\"\"Calculate the ground-truth policy value of a given action distribution.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/linear.py",
"new_path": "obp/policy/linear.py",
"diff": "@@ -89,7 +89,7 @@ class LinEpsilonGreedy(BaseContextualPolicy):\nif self.random_.rand() > self.epsilon:\nself.theta_hat = np.concatenate(\n[\n- self.A_inv[i] @ np.expand_dims(self.b[:, i], axis=1)\n+ self.A_inv[i] @ self.b[:, i][:, np.newaxis]\nfor i in np.arange(self.n_actions)\n],\naxis=1,\n@@ -209,7 +209,7 @@ class LinUCB(BaseContextualPolicy):\n)\nself.theta_hat = np.concatenate(\n[\n- self.A_inv[i] @ np.expand_dims(self.b[:, i], axis=1)\n+ self.A_inv[i] @ self.b[:, i][:, np.newaxis]\nfor i in np.arange(self.n_actions)\n],\naxis=1,\n@@ -311,17 +311,16 @@ class LinTS(BaseContextualPolicy):\n\"\"\"\ntheta_hat = np.concatenate(\n[\n- self.A_inv[i] @ np.expand_dims(self.b[:, i], axis=1)\n+ self.A_inv[i] @ self.b[:, i][:, np.newaxis]\nfor i in np.arange(self.n_actions)\n],\naxis=1,\n)\ntheta_sampled = np.concatenate(\n[\n- np.expand_dims(\n- self.random_.multivariate_normal(theta_hat[:, i], self.A_inv[i]),\n- axis=1,\n- )\n+ self.random_.multivariate_normal(theta_hat[:, i], self.A_inv[i])[\n+ :, np.newaxis\n+ ]\nfor i in np.arange(self.n_actions)\n],\naxis=1,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -344,7 +344,7 @@ def sigmoid(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:\ndef softmax(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:\n\"\"\"Calculate softmax function.\"\"\"\n- b = np.expand_dims(np.max(x, axis=1), 1)\n+ b = np.max(x, axis=1)[:, np.newaxis]\nnumerator = np.exp(x - b)\n- denominator = np.expand_dims(np.sum(numerator, axis=1), 1)\n+ denominator = np.sum(numerator, axis=1)[:, np.newaxis]\nreturn numerator / denominator\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_all_estimators.py",
"new_path": "tests/ope/test_all_estimators.py",
"diff": "@@ -244,9 +244,7 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\n\"\"\"\nbandit_feedback = synthetic_bandit_feedback\naction_dist = random_action_dist\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\n# test all estimators\nall_estimators = ope.__all_estimators__\nestimators = [\n@@ -285,9 +283,7 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\n\"\"\"\nbandit_feedback = synthetic_bandit_feedback\naction_dist = random_action_dist\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\n# test all estimators\nall_estimators = ope.__all_estimators__\nestimators = [\n@@ -331,9 +327,7 @@ def test_performance_of_ope_estimators_using_random_evaluation_policy(\n\"\"\"\nTest the performance of ope estimators using synthetic bandit data and random evaluation policy\n\"\"\"\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\naction_dist = random_action_dist\n# compute ground truth policy value using expected reward\nq_pi_e = np.average(expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1)\n@@ -373,9 +367,7 @@ def test_response_format_of_ope_estimators_using_random_evaluation_policy(\n\"\"\"\nTest the response format of ope estimators using synthetic bandit data and random evaluation policy\n\"\"\"\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\naction_dist = random_action_dist\n# test all estimators\nall_estimators = ope.__all_estimators__\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dm_estimators.py",
"new_path": "tests/ope/test_dm_estimators.py",
"diff": "@@ -62,9 +62,7 @@ def test_dm_using_random_evaluation_policy(\n\"\"\"\nTest the performance of the direct method using synthetic bandit data and random evaluation policy\n\"\"\"\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\naction_dist = random_action_dist\n# compute ground truth policy value using expected reward\nq_pi_e = np.average(expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -341,9 +341,7 @@ def test_dr_using_random_evaluation_policy(\n\"\"\"\nTest the format of dr variants using synthetic bandit data and random evaluation policy\n\"\"\"\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\naction_dist = random_action_dist\n# prepare input dict\ninput_dict = {\n@@ -380,9 +378,7 @@ def test_boundedness_of_sndr_using_random_evaluation_policy(\n\"\"\"\nTest the boundedness of sndr estimators using synthetic bandit data and random evaluation policy\n\"\"\"\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\naction_dist = random_action_dist\n# prepare input dict\ninput_dict = {\n@@ -406,9 +402,7 @@ def test_dr_shrinkage_using_random_evaluation_policy(\n\"\"\"\nTest the dr shrinkage estimators using synthetic bandit data and random evaluation policy\n\"\"\"\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\naction_dist = random_action_dist\n# prepare input dict\ninput_dict = {\n@@ -436,9 +430,7 @@ def test_switch_dr_using_random_evaluation_policy(\n\"\"\"\nTest the switch_dr using synthetic bandit data and random evaluation policy\n\"\"\"\n- expected_reward = np.expand_dims(\n- synthetic_bandit_feedback[\"expected_reward\"], axis=-1\n- )\n+ expected_reward = synthetic_bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\naction_dist = random_action_dist\n# prepare input dict\ninput_dict = {\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_regression_models.py",
"new_path": "tests/ope/test_regression_models.py",
"diff": "@@ -816,7 +816,7 @@ def test_performance_of_binary_outcome_models(\nwhen the regression model is estimated by a logistic regression\n\"\"\"\nbandit_feedback = fixed_synthetic_bandit_feedback.copy()\n- expected_reward = np.expand_dims(bandit_feedback[\"expected_reward\"], axis=-1)\n+ expected_reward = bandit_feedback[\"expected_reward\"][:, :, np.newaxis]\naction_dist = random_action_dist\n# compute ground truth policy value using expected reward\nq_pi_e = np.average(expected_reward[:, :, 0], weights=action_dist[:, :, 0], axis=1)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | replace expand_dims with np.newaxis |
641,003 | 20.03.2021 13:51:07 | -32,400 | 7e998f7e1e276ef3fcdcf31f544a9911074f8678 | add flake8 to github actions | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/lints.yml",
"new_path": ".github/workflows/lints.yml",
"diff": "@@ -23,3 +23,9 @@ jobs:\nuses: psf/black@stable\nwith:\nargs: \". --check --diff\"\n+\n+ - name: flake8\n+ run: |\n+ python -m pip install --upgrade pip\n+ pip install flake8\n+ flake8 .\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add flake8 to github actions |
641,003 | 20.03.2021 14:51:00 | -32,400 | 8a633d71e410d9986d9118258c2ea8a4e765de80 | adopt auto in Enum | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/policy_type.py",
"new_path": "obp/policy/policy_type.py",
"diff": "@@ -14,9 +14,9 @@ class PolicyType(enum.Enum):\nThe policy type is offline.\n\"\"\"\n- CONTEXT_FREE = 0\n- CONTEXTUAL = 1\n- OFFLINE = 2\n+ CONTEXT_FREE = enum.auto()\n+ CONTEXTUAL = enum.auto()\n+ OFFLINE = enum.auto()\ndef __repr__(self) -> str:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | adopt auto in Enum |
641,006 | 21.03.2021 17:44:01 | -32,400 | 4d7326c9684aaf0380814a7ae0a0b8036124a78a | fix sample_reward_given_expected_reward; add reward structure where rewards depend on those of other slots | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -128,6 +128,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndim_context: int = 1\nreward_type: str = \"binary\"\nreward_structure: str = \"RIPS\"\n+ reward_transition_rate: Optional[np.ndarray] = np.array([0.5, 0.2])\nexam_weight: Optional[np.ndarray] = None\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\nbehavior_policy_function: Optional[\n@@ -161,7 +162,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"reward_type must be either 'binary' or 'continuous', but {self.reward_type} is given.'\"\n)\n- if self.reward_structure not in [\"RIPS\", \"SIPS\", \"IIPS\"]:\n+ if self.reward_structure not in [\"RIPS\", \"SIPS\", \"IIPS\", \"Cascade\", \"Greedy\"]:\nraise ValueError(\nf\"reward_structure must be either 'RIPS', 'SIPS', or 'IIPS', but {self.reward_structure} is given.'\"\n)\n@@ -173,12 +174,15 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nf\"exam_weight must be ndarray or None, but {self.exam_weight} is given\"\n)\n# TODO: implement slot_weight_matrix\n- if self.reward_structure == \"IIPS\":\n- self.slot_weight_matrix = np.identity(self.len_list)\n- elif self.reward_structure == \"SIPS\":\n+ if self.reward_structure == \"SIPS\":\nself.slot_weight_matrix = self.get_sips_slot_weight(self.len_list)\n- else:\n+ elif self.reward_structure == \"RIPS\":\nself.slot_weight_matrix = self.get_rips_slot_weight(self.len_list)\n+ elif self.reward_structure in [\"Cascade\", \"Greedy\"]:\n+ self.exam_weight = np.ones(self.len_list)\n+ self.slot_weight_matrix = np.identity(self.len_list)\n+ else:\n+ self.slot_weight_matrix = np.identity(self.len_list)\nif not isinstance(self.random_state, int):\nraise ValueError(\"random_state must be an integer\")\nself.random_ = check_random_state(self.random_state)\n@@ -277,28 +281,58 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn self.random_.uniform(size=(self.n_actions, self.len_list))\ndef sample_reward_given_expected_reward(\n- self, expected_reward: np.ndarray, action: np.ndarray, n_rounds: int\n+ self, expected_reward_factual: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n- # weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n- expected_reward_ = np.tile(expected_reward, (n_rounds, 1, 1))\n- # action_2d: array-like, shape (n_rounds, len_list)\n- action_2d = action.reshape((n_rounds, self.len_list))\n- # expected_reward_factual_actions: list, shape (len_list, n_rounds)\n- expected_reward_factual = [\n- expected_reward_[np.arange(n_rounds), action_2d[:, position_], position_]\n- for position_ in range(self.len_list)\n- ]\n+ # expected_reward_factual_actions: list, shape (n_rounds, len_list)\n+ if self.reward_structure in [\"Cascade\", \"Greedy\"]:\n+ reward = np.zeros(expected_reward_factual.shape)\n+ for i in tqdm(\n+ np.arange(reward.shape[0]),\n+ desc=\"[sample_reward]\",\n+ total=reward.shape[0],\n+ ):\n+ previous_reward = 0.0\n+ action_list = (\n+ np.arange(3)\n+ if self.reward_structure == \"Cascade\"\n+ else np.argsort(expected_reward_factual[i])[::-1]\n+ )\nif self.reward_type == \"binary\":\n+ for position_ in action_list:\n+ reward[i, position_] = self.random_.binomial(\n+ n=1,\n+ p=expected_reward_factual[i, position_]\n+ * self.reward_transition_rate[int(previous_reward)],\n+ )\n+ previous_reward = reward[i, position_]\n+ else:\n+ for position_ in action_list:\n+ mean = (\n+ expected_reward_factual[i, position_]\n+ * self.reward_transition_rate[int(previous_reward >= 0)]\n+ )\n+ a = (self.reward_min - mean) / self.reward_std\n+ b = (self.reward_max - mean) / self.reward_std\n+ reward[i, position_] = truncnorm.rvs(\n+ a=a,\n+ b=b,\n+ loc=mean,\n+ scale=self.reward_std,\n+ random_state=self.random_state,\n+ )\n+ previous_reward = reward[i, position_]\n+\n+ elif self.reward_type == \"binary\":\nreward = np.array(\n[\n- self.random_.binomial(n=1, p=expected_reward_factual[position_])\n+ self.random_.binomial(n=1, p=expected_reward_factual[:, position_])\nfor position_ in range(self.len_list)\n]\n).T\nelif self.reward_type == \"continuous\":\n- reward = np.zeros((self.n_actions, self.len_list))\n+ reward = np.zeros(expected_reward_factual.shape)\nfor position_ in range(self.len_list):\n- mean = expected_reward_factual\n+ mean = expected_reward_factual[:, position_]\na = (self.reward_min - mean) / self.reward_std\nb = (self.reward_max - mean) / self.reward_std\nreward[:, position_] = truncnorm.rvs(\n@@ -311,7 +345,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nelse:\nraise NotImplementedError\n# return: two arrays, shape (n_rounds, len_list)\n- return np.array(expected_reward_factual).T, reward\n+ return reward\ndef obtain_batch_bandit_feedback(\nself,\n@@ -360,12 +394,21 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nif self.reward_function is None:\nexpected_reward = self.sample_contextfree_expected_reward()\n- expected_reward_factual, reward = self.sample_reward_given_expected_reward(\n- expected_reward=expected_reward, action=action, n_rounds=n_rounds\n- )\n+ expected_reward_tile = np.tile(expected_reward, (n_rounds, 1, 1))\n+ # action_2d: array-like, shape (n_rounds, len_list)\n+ action_2d = action.reshape((n_rounds, self.len_list))\n+ # expected_reward_factual_actions: list, shape (len_list, n_rounds)\n+ expected_reward_factual = np.array(\n+ [\n+ expected_reward_tile[\n+ np.arange(n_rounds), action_2d[:, position_], position_\n+ ]\n+ for position_ in range(self.len_list)\n+ ]\n+ ).T\nelse:\n- expected_reward_factual, reward = self.reward_function(\n+ expected_reward_factual = self.reward_function(\ncontext=context,\naction_context=self.action_context,\naction=action,\n@@ -373,6 +416,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nexam_weight=self.exam_weight,\nrandom_state=self.random_state,\n)\n+ reward = self.sample_reward_given_expected_reward(\n+ expected_reward_factual=expected_reward_factual\n+ )\n+\nreturn dict(\nn_rounds=n_rounds,\nn_actions=self.n_actions,\n@@ -389,6 +436,48 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\n+def linear_weighted_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ action: np.ndarray,\n+ slot_weight_matrix: np.ndarray,\n+ exam_weight: np.ndarray,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+) -> np.ndarray:\n+ \"\"\"\n+ slot_weight_matrix: array-like, shape (len_list, len_list)\n+ \"\"\"\n+ # fix slot_weight_matrix by exam_weight\n+ slot_weight_matrix = slot_weight_matrix * exam_weight\n+ # action_2d: array-like, shape (n_rounds, len_list)\n+ action_2d = action.reshape((context.shape[0], slot_weight_matrix.shape[0]))\n+ # action_3d: array-like, shape (n_rounds, n_actions, len_list)\n+ action_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)\n+ if slot_weight_matrix.shape[0] < action_3d.shape[2]:\n+ raise ValueError(\n+ \"the size of axis 0 of slot_weight_matrix must be the same as the size of axis 1 of action_3d\"\n+ )\n+ # expected_reward: array-like, shape (n_rounds, n_actions)\n+ expected_reward = linear_reward_function(\n+ context=context, action_context=action_context, random_state=random_state\n+ )\n+ # expected_reward_3d: array-like, shape (n_rounds, n_actions, len_list)\n+ expected_reward_3d = np.tile(\n+ expected_reward, (slot_weight_matrix.shape[0], 1, 1)\n+ ).transpose(1, 2, 0)\n+ # action_weight: array-like, shape (n_rounds, n_actions, len_list)\n+ action_weight = action_3d @ slot_weight_matrix\n+ # weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n+ weighted_expected_reward = action_weight * expected_reward_3d\n+ # expected_reward_factual: list, shape (n_rounds, len_list)\n+ expected_reward_factual = (\n+ weighted_expected_reward.sum(axis=1) / slot_weight_matrix.shape[0]\n+ )\n+ # return: array, shape (n_rounds, len_list)\n+ return np.array(expected_reward_factual)\n+\n+\ndef logistic_weighted_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n@@ -397,13 +486,12 @@ def logistic_weighted_reward_function(\nexam_weight: np.ndarray,\nrandom_state: Optional[int] = None,\n**kwargs,\n-) -> Tuple[np.ndarray, np.ndarray]:\n+) -> np.ndarray:\n\"\"\"\nslot_weight_matrix: array-like, shape (len_list, len_list)\n\"\"\"\n# fix slot_weight_matrix by exam_weight\nslot_weight_matrix = slot_weight_matrix * exam_weight\n- random_ = check_random_state(random_state)\n# action_2d: array-like, shape (n_rounds, len_list)\naction_2d = action.reshape((context.shape[0], slot_weight_matrix.shape[0]))\n# action_3d: array-like, shape (n_rounds, n_actions, len_list)\n@@ -428,14 +516,8 @@ def logistic_weighted_reward_function(\nexpected_reward_factual = (\nweighted_expected_reward.sum(axis=1) / slot_weight_matrix.shape[0]\n)\n- reward = np.array(\n- [\n- random_.binomial(n=1, p=expected_reward_factual[:, position_])\n- for position_ in range(slot_weight_matrix.shape[0])\n- ]\n- )\n- # return: two arrays, shape (n_rounds, len_list)\n- return np.array(expected_reward_factual), reward.T\n+ # return: array, shape (n_rounds, len_list)\n+ return np.array(expected_reward_factual)\ndef logistic_reward_function(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -7,6 +7,7 @@ import pandas as pd\nfrom obp.dataset import SyntheticSlateBanditDataset\nfrom obp.dataset.synthetic_slate import (\nlogistic_weighted_reward_function,\n+ linear_weighted_reward_function,\nlinear_behavior_policy_logit,\n)\nfrom obp.types import BanditFeedback\n@@ -417,3 +418,215 @@ def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_\n# import pdb\n# pdb.set_trace()\n+\n+\n+# n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, exam_weight, behavior_policy_function, reward_function, return_pscore_marginal, description\n+valid_input_ = [\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"SIPS\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_weighted_reward_function,\n+ False,\n+ \"SIPS\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"IIPS\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_weighted_reward_function,\n+ False,\n+ \"IIPS\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"RIPS\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_weighted_reward_function,\n+ False,\n+ \"RIPS\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"continuous\",\n+ 123,\n+ 1000,\n+ \"SIPS\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ linear_weighted_reward_function,\n+ False,\n+ \"SIPS continuous\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"continuous\",\n+ 123,\n+ 1000,\n+ \"IIPS\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ linear_weighted_reward_function,\n+ False,\n+ \"IIPS continuous\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"continuous\",\n+ 123,\n+ 1000,\n+ \"RIPS\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ linear_weighted_reward_function,\n+ False,\n+ \"RIPS continuous\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"continuous\",\n+ 123,\n+ 1000,\n+ \"RIPS\",\n+ 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\n+ False,\n+ \"Random policy and reward function (continous reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"Cascade\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_weighted_reward_function,\n+ False,\n+ \"Cascade (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"continuous\",\n+ 123,\n+ 1000,\n+ \"Cascade\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ linear_weighted_reward_function,\n+ False,\n+ \"Cascade (continous reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"Greedy\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_weighted_reward_function,\n+ False,\n+ \"Greedy (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"continuous\",\n+ 123,\n+ 1000,\n+ \"Greedy\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ linear_weighted_reward_function,\n+ False,\n+ \"Greedy (continous reward)\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, exam_weight, behavior_policy_function, reward_function, return_pscore_marginal, description\",\n+ valid_input_,\n+)\n+def test_synthetic_slate_using_valid_inputs(\n+ n_actions,\n+ len_list,\n+ dim_context,\n+ reward_type,\n+ random_state,\n+ n_rounds,\n+ reward_structure,\n+ exam_weight,\n+ behavior_policy_function,\n+ reward_function,\n+ return_pscore_marginal,\n+ description,\n+):\n+ dataset = SyntheticSlateBanditDataset(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ exam_weight=exam_weight,\n+ random_state=random_state,\n+ behavior_policy_function=behavior_policy_function,\n+ reward_function=reward_function,\n+ )\n+ # get feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_pscore_marginal=return_pscore_marginal\n+ )\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+ pscore_columns = [\n+ \"pscore_joint_above\",\n+ \"pscore_joint_all\",\n+ \"pscore_marginal\",\n+ ]\n+ bandit_feedback_df = pd.DataFrame()\n+ for column in [\n+ \"impression_id\",\n+ \"position\",\n+ \"action\",\n+ \"reward\",\n+ \"expected_reward_factual\",\n+ ] + pscore_columns:\n+ bandit_feedback_df[column] = bandit_feedback[column]\n+ print(f\"-------{description}--------\")\n+ print(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix sample_reward_given_expected_reward; add reward structure where rewards depend on those of other slots |
641,006 | 21.03.2021 18:50:27 | -32,400 | 95a54403d03fe914526177bb81ac14e9c7ad92dc | clean reward functions; add several comments | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -13,7 +13,8 @@ from tqdm import tqdm\nfrom .base import BaseBanditDataset\nfrom ..types import BanditFeedback\n-from ..utils import sigmoid, softmax\n+from ..utils import softmax\n+from .synthetic import logistic_reward_function, linear_reward_function\n@dataclass\n@@ -24,15 +25,19 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n-----\nBy calling the `obtain_batch_bandit_feedback` method several times,\nwe have different bandit samples with the same setting.\n- This can be used to estimate confidence intervals of the performances of OPE estimators.\n+ This can be used to estimate confidence intervals of the performances of Slate OPE estimators.\nIf None is set as `behavior_policy_function`, the synthetic data will be context-free bandit feedback.\nParameters\n-----------\n- n_actions: int\n+ n_actions: int (>= len_list)\nNumber of actions.\n+ len_list: int (> 1)\n+ Length of a list of actions recommended in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\ndim_context: int, default=1\nNumber of dimensions of context vectors.\n@@ -42,24 +47,35 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nWhen 'continuous' is given, rewards are sampled from the truncated Normal distribution with `scale=1`.\nThe mean parameter of the reward distribution is determined by the `reward_function` specified by the next argument.\n- reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\n- Function generating expected reward for each given action-context pair,\n- i.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n- If None is set, context **independent** expected reward for each action will be\n+ TODO: ---comment---\n+ reward_structure: str, default='cascade'\n+ TBD\n+\n+ reward_transition_rate: np.ndarray, default=np.array([0.5, 0.2])\n+ TBD\n+\n+ exam_weight: np.ndarray, default=None\n+ TBD\n+ ---TODO---:\n+\n+ reward_function: Callable[[np.ndarray] * 5, np.ndarray]], default=None\n+ Function generating slot-level expected reward for each given factual action-context pair,\n+ i.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A}^{\\\\mathcal{L}} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{L}}`.\n+ If None is set, context **independent** expected reward for each factual action will be\nsampled from the uniform distribution automatically.\nbehavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default=None\n- Function generating probability distribution over action space,\n- i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+ Function generating logit value of each action in action space,\n+ i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\nIf None is set, context **independent** uniform distribution will be used (uniform random behavior policy).\nrandom_state: int, default=12345\n- Controls the random seed in sampling synthetic bandit dataset.\n+ Controls the random seed in sampling synthetic slate bandit dataset.\n- dataset_name: str, default='synthetic_bandit_dataset'\n+ dataset_name: str, default='synthetic_slate_bandit_dataset'\nName of the dataset.\n- Examples\n+ TODO: Examples\n----------\n.. code-block:: python\n@@ -130,7 +146,11 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreward_structure: str = \"RIPS\"\nreward_transition_rate: Optional[np.ndarray] = np.array([0.5, 0.2])\nexam_weight: Optional[np.ndarray] = None\n- reward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\n+ reward_function: Optional[\n+ Callable[\n+ [np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray\n+ ]\n+ ] = None\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n] = None\n@@ -173,12 +193,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"exam_weight must be ndarray or None, but {self.exam_weight} is given\"\n)\n- # TODO: implement slot_weight_matrix\n+ # TODO: fix reward structure names\nif self.reward_structure == \"SIPS\":\nself.slot_weight_matrix = self.get_sips_slot_weight(self.len_list)\nelif self.reward_structure == \"RIPS\":\nself.slot_weight_matrix = self.get_rips_slot_weight(self.len_list)\nelif self.reward_structure in [\"Cascade\", \"Greedy\"]:\n+ # exam weight is reset when reward structure is cascade of greedy\nself.exam_weight = np.ones(self.len_list)\nself.slot_weight_matrix = np.identity(self.len_list)\nelse:\n@@ -192,6 +213,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself.reward_min = 0\nself.reward_max = 1e10\nself.reward_std = 1.0\n+ # set the base reward function\n+ if self.reward_function is not None:\n+ if self.reward_type == \"binary\":\n+ self.base_reward_function = logistic_reward_function\n+ else:\n+ self.base_reward_function = linear_reward_function\n# one-hot encoding representations characterizing each action\nself.action_context = np.eye(self.n_actions, dtype=int)\n@@ -234,6 +261,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nn_rounds: int,\nreturn_pscore_marginal: bool = True,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray]]:\n+ \"\"\"TODO: comment\"\"\"\naction = np.zeros(n_rounds * self.len_list, dtype=int)\npscore_joint_above = np.zeros(n_rounds * self.len_list)\npscore_joint_all = np.zeros(n_rounds * self.len_list)\n@@ -283,7 +311,18 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndef sample_reward_given_expected_reward(\nself, expected_reward_factual: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n- # expected_reward_factual_actions: list, shape (n_rounds, len_list)\n+ \"\"\"Sample reward for each action and slot based on expected_reward_factual\n+\n+ Parameters\n+ ------------\n+ expected_reward_factual: array-like, shape (n_rounds, len_list)\n+ expected reward of factual actions\n+\n+ Returns\n+ ----------\n+ sampled reward: array-like, shape (n_actions, len_list)\n+\n+ \"\"\"\nif self.reward_structure in [\"Cascade\", \"Greedy\"]:\nreward = np.zeros(expected_reward_factual.shape)\nfor i in tqdm(\n@@ -292,6 +331,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ntotal=reward.shape[0],\n):\nprevious_reward = 0.0\n+ # actions are in order of browsing assumption\naction_list = (\nnp.arange(3)\nif self.reward_structure == \"Cascade\"\n@@ -344,7 +384,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\nelse:\nraise NotImplementedError\n- # return: two arrays, shape (n_rounds, len_list)\n+ # return: array-like, shape (n_rounds, len_list)\nreturn reward\ndef obtain_batch_bandit_feedback(\n@@ -360,10 +400,18 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nn_rounds: int\nNumber of rounds for synthetic bandit feedback data.\n+ tau: int or float, default=1.0\n+ A temperature parameter, controlling the randomness of the action choice.\n+ As :math:`\\\\tau \\\\rightarrow \\\\infty`, the algorithm will select arms uniformly at random.\n+\n+ return_pscore_marginal: bool, default=True\n+ A boolean parameter whether `pscore_marginal` is returned or not.\n+ When `n_actions` and `len_list` are large, this parameter should be set to False because of the computational time\n+\nReturns\n---------\nbandit_feedback: BanditFeedback\n- Generated synthetic bandit feedback dataset.\n+ Generated synthetic slate bandit feedback dataset.\n\"\"\"\nif not isinstance(n_rounds, int) or n_rounds <= 0:\n@@ -381,6 +429,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_context=self.action_context,\nrandom_state=self.random_state,\n)\n+ # check the shape of behavior_policy_logit_\n+ if not (\n+ isinstance(behavior_policy_logit_, np.ndarray)\n+ and behavior_policy_logit_.shape == (n_rounds, self.n_actions)\n+ ):\n+ raise ValueError(\"behavior_policy_logit_ is Invalid\")\n+ # sample action and pscores\n(\naction,\npscore_joint_above,\n@@ -391,13 +446,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nn_rounds=n_rounds,\nreturn_pscore_marginal=return_pscore_marginal,\n)\n-\n+ # sample expected reward factual\nif self.reward_function is None:\nexpected_reward = self.sample_contextfree_expected_reward()\nexpected_reward_tile = np.tile(expected_reward, (n_rounds, 1, 1))\n# action_2d: array-like, shape (n_rounds, len_list)\naction_2d = action.reshape((n_rounds, self.len_list))\n- # expected_reward_factual_actions: list, shape (len_list, n_rounds)\n+ # expected_reward_factual: array-like, shape (n_rounds, len_list)\nexpected_reward_factual = np.array(\n[\nexpected_reward_tile[\n@@ -407,15 +462,22 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n]\n).T\nelse:\n-\nexpected_reward_factual = self.reward_function(\ncontext=context,\naction_context=self.action_context,\naction=action,\nslot_weight_matrix=self.slot_weight_matrix,\nexam_weight=self.exam_weight,\n+ base_function=self.base_reward_function,\nrandom_state=self.random_state,\n)\n+ # check the shape of expected_reward_factual\n+ if not (\n+ isinstance(expected_reward_factual, np.ndarray)\n+ and expected_reward_factual.shape == (n_rounds, self.len_list)\n+ ):\n+ raise ValueError(\"expected_reward_factual is Invalid\")\n+ # sample reward\nreward = self.sample_reward_given_expected_reward(\nexpected_reward_factual=expected_reward_factual\n)\n@@ -436,58 +498,17 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\n-def linear_weighted_reward_function(\n- context: np.ndarray,\n- action_context: np.ndarray,\n- action: np.ndarray,\n- slot_weight_matrix: np.ndarray,\n- exam_weight: np.ndarray,\n- random_state: Optional[int] = None,\n- **kwargs,\n-) -> np.ndarray:\n- \"\"\"\n- slot_weight_matrix: array-like, shape (len_list, len_list)\n- \"\"\"\n- # fix slot_weight_matrix by exam_weight\n- slot_weight_matrix = slot_weight_matrix * exam_weight\n- # action_2d: array-like, shape (n_rounds, len_list)\n- action_2d = action.reshape((context.shape[0], slot_weight_matrix.shape[0]))\n- # action_3d: array-like, shape (n_rounds, n_actions, len_list)\n- action_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)\n- if slot_weight_matrix.shape[0] < action_3d.shape[2]:\n- raise ValueError(\n- \"the size of axis 0 of slot_weight_matrix must be the same as the size of axis 1 of action_3d\"\n- )\n- # expected_reward: array-like, shape (n_rounds, n_actions)\n- expected_reward = linear_reward_function(\n- context=context, action_context=action_context, random_state=random_state\n- )\n- # expected_reward_3d: array-like, shape (n_rounds, n_actions, len_list)\n- expected_reward_3d = np.tile(\n- expected_reward, (slot_weight_matrix.shape[0], 1, 1)\n- ).transpose(1, 2, 0)\n- # action_weight: array-like, shape (n_rounds, n_actions, len_list)\n- action_weight = action_3d @ slot_weight_matrix\n- # weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n- weighted_expected_reward = action_weight * expected_reward_3d\n- # expected_reward_factual: list, shape (n_rounds, len_list)\n- expected_reward_factual = (\n- weighted_expected_reward.sum(axis=1) / slot_weight_matrix.shape[0]\n- )\n- # return: array, shape (n_rounds, len_list)\n- return np.array(expected_reward_factual)\n-\n-\n-def logistic_weighted_reward_function(\n+def weighted_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\naction: np.ndarray,\nslot_weight_matrix: np.ndarray,\nexam_weight: np.ndarray,\n+ base_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> np.ndarray:\n- \"\"\"\n+ \"\"\"TODO: comment\nslot_weight_matrix: array-like, shape (len_list, len_list)\n\"\"\"\n# fix slot_weight_matrix by exam_weight\n@@ -501,7 +522,7 @@ def logistic_weighted_reward_function(\n\"the size of axis 0 of slot_weight_matrix must be the same as the size of axis 1 of action_3d\"\n)\n# expected_reward: array-like, shape (n_rounds, n_actions)\n- expected_reward = logistic_reward_function(\n+ expected_reward = base_function(\ncontext=context, action_context=action_context, random_state=random_state\n)\n# expected_reward_3d: array-like, shape (n_rounds, n_actions, len_list)\n@@ -520,95 +541,13 @@ def logistic_weighted_reward_function(\nreturn np.array(expected_reward_factual)\n-def logistic_reward_function(\n- context: np.ndarray,\n- action_context: np.ndarray,\n- random_state: Optional[int] = None,\n-) -> np.ndarray:\n- \"\"\"Logistic mean reward function for synthetic bandit datasets.\n-\n- Parameters\n- -----------\n- context: array-like, shape (n_rounds, dim_context)\n- Context vectors characterizing each round (such as user information).\n-\n- action_context: array-like, shape (n_actions, dim_action_context)\n- Vector representation for each action.\n-\n- random_state: int, default=None\n- Controls the random seed in sampling dataset.\n-\n- Returns\n- ---------\n- expected_reward: array-like, shape (n_rounds, n_actions)\n- Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n-\n- \"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2-dimensional ndarray\")\n-\n- if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n- raise ValueError(\"action_context must be 2-dimensional ndarray\")\n-\n- random_ = check_random_state(random_state)\n- logits = np.zeros((context.shape[0], action_context.shape[0]))\n- # each arm has different coefficient vectors\n- coef_ = random_.uniform(size=(action_context.shape[0], context.shape[1]))\n- action_coef_ = random_.uniform(size=action_context.shape[1])\n- for d in np.arange(action_context.shape[0]):\n- logits[:, d] = context @ coef_[d] + action_context[d] @ action_coef_\n-\n- return sigmoid(logits)\n-\n-\n-def linear_reward_function(\n- context: np.ndarray,\n- action_context: np.ndarray,\n- random_state: Optional[int] = None,\n-) -> np.ndarray:\n- \"\"\"Linear mean reward function for synthetic bandit datasets.\n-\n- Parameters\n- -----------\n- context: array-like, shape (n_rounds, dim_context)\n- Context vectors characterizing each round (such as user information).\n-\n- action_context: array-like, shape (n_actions, dim_action_context)\n- Vector representation for each action.\n-\n- random_state: int, default=None\n- Controls the random seed in sampling dataset.\n-\n- Returns\n- ---------\n- expected_reward: array-like, shape (n_rounds, n_actions)\n- Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n-\n- \"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2-dimensional ndarray\")\n-\n- if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n- raise ValueError(\"action_context must be 2-dimensional ndarray\")\n-\n- random_ = check_random_state(random_state)\n- expected_reward = np.zeros((context.shape[0], action_context.shape[0]))\n- # each arm has different coefficient vectors\n- coef_ = random_.uniform(size=(action_context.shape[0], context.shape[1]))\n- action_coef_ = random_.uniform(size=action_context.shape[1])\n- for d in np.arange(action_context.shape[0]):\n- expected_reward[:, d] = context @ coef_[d] + action_context[d] @ action_coef_\n-\n- return expected_reward\n-\n-\ndef linear_behavior_policy_logit(\ncontext: np.ndarray,\naction_context: np.ndarray,\nrandom_state: Optional[int] = None,\ntau: Union[int, float] = 1.0,\n) -> np.ndarray:\n- \"\"\"Linear contextual behavior policy for synthetic bandit datasets.\n+ \"\"\"Linear contextual behavior policy for synthetic slate bandit datasets.\nParameters\n-----------\n@@ -625,11 +564,10 @@ def linear_behavior_policy_logit(\nA temperature parameter, controlling the randomness of the action choice.\nAs :math:`\\\\tau \\\\rightarrow \\\\infty`, the algorithm will select arms uniformly at random.\n-\nReturns\n---------\n- behavior_policy: array-like, shape (n_rounds, n_actions)\n- logit given context (:math:`x`), i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+ logit value: array-like, shape (n_rounds, n_actions)\n+ logit given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n\"\"\"\nif not isinstance(context, np.ndarray) or context.ndim != 2:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -6,8 +6,7 @@ import pandas as pd\nfrom obp.dataset import SyntheticSlateBanditDataset\nfrom obp.dataset.synthetic_slate import (\n- logistic_weighted_reward_function,\n- linear_weighted_reward_function,\n+ weighted_reward_function,\nlinear_behavior_policy_logit,\n)\nfrom obp.types import BanditFeedback\n@@ -253,7 +252,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nexam_weight=1 / np.exp(np.arange(len_list)),\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=logistic_weighted_reward_function,\n+ reward_function=weighted_reward_function,\n)\n# get feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\n@@ -290,7 +289,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nexam_weight=1 / np.exp(np.arange(len_list)),\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=logistic_weighted_reward_function,\n+ reward_function=weighted_reward_function,\n)\n# get feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\n@@ -326,7 +325,7 @@ def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_\nexam_weight=1 / np.exp(np.arange(len_list)),\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=logistic_weighted_reward_function,\n+ reward_function=weighted_reward_function,\n)\n# get feedback\nbandit_feedback_r = dataset_r.obtain_batch_bandit_feedback(\n@@ -359,7 +358,7 @@ def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_\nexam_weight=1 / np.exp(np.arange(len_list)),\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=logistic_weighted_reward_function,\n+ reward_function=weighted_reward_function,\n)\n# get feedback\nbandit_feedback_s = dataset_s.obtain_batch_bandit_feedback(\n@@ -392,7 +391,7 @@ def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_\nexam_weight=1 / np.exp(np.arange(len_list)),\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=logistic_weighted_reward_function,\n+ reward_function=weighted_reward_function,\n)\n# get feedback\nbandit_feedback_i = dataset_i.obtain_batch_bandit_feedback(\n@@ -432,7 +431,7 @@ valid_input_ = [\n\"SIPS\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- logistic_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"SIPS\",\n),\n@@ -446,7 +445,7 @@ valid_input_ = [\n\"IIPS\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- logistic_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"IIPS\",\n),\n@@ -460,7 +459,7 @@ valid_input_ = [\n\"RIPS\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- logistic_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"RIPS\",\n),\n@@ -474,7 +473,7 @@ valid_input_ = [\n\"SIPS\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- linear_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"SIPS continuous\",\n),\n@@ -488,7 +487,7 @@ valid_input_ = [\n\"IIPS\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- linear_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"IIPS continuous\",\n),\n@@ -502,7 +501,7 @@ valid_input_ = [\n\"RIPS\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- linear_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"RIPS continuous\",\n),\n@@ -530,7 +529,7 @@ valid_input_ = [\n\"Cascade\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- logistic_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"Cascade (binary reward)\",\n),\n@@ -544,7 +543,7 @@ valid_input_ = [\n\"Cascade\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- linear_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"Cascade (continous reward)\",\n),\n@@ -558,7 +557,7 @@ valid_input_ = [\n\"Greedy\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- logistic_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"Greedy (binary reward)\",\n),\n@@ -572,7 +571,7 @@ valid_input_ = [\n\"Greedy\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- linear_weighted_reward_function,\n+ weighted_reward_function,\nFalse,\n\"Greedy (continous reward)\",\n),\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | clean reward functions; add several comments |
641,006 | 03.04.2021 01:06:33 | -32,400 | b88b0b85042f4fbfe440068b0ec46aa1d9371fd2 | fix reward structures; add test of slate dataset | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -13,8 +13,7 @@ from tqdm import tqdm\nfrom .base import BaseBanditDataset\nfrom ..types import BanditFeedback\n-from ..utils import softmax\n-from .synthetic import logistic_reward_function, linear_reward_function\n+from ..utils import softmax, sigmoid\n@dataclass\n@@ -143,10 +142,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nlen_list: int\ndim_context: int = 1\nreward_type: str = \"binary\"\n- reward_structure: str = \"RIPS\"\n- reward_transition_rate: Optional[np.ndarray] = np.array([0.5, 0.2])\n+ reward_structure: str = \"cascade_additive\"\n+ click_model: Optional[str] = None\nexam_weight: Optional[np.ndarray] = None\n- reward_function: Optional[\n+ base_reward_function: Optional[\nCallable[\n[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray\n]\n@@ -175,6 +174,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"dim_context must be a positive integer, but {self.dim_context} is given\"\n)\n+ if not isinstance(self.random_state, int):\n+ raise ValueError(\"random_state must be an integer\")\n+ self.random_ = check_random_state(self.random_state)\nif self.reward_type not in [\n\"binary\",\n\"continuous\",\n@@ -182,7 +184,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"reward_type must be either 'binary' or 'continuous', but {self.reward_type} is given.'\"\n)\n- if self.reward_structure not in [\"RIPS\", \"SIPS\", \"IIPS\", \"Cascade\", \"Greedy\"]:\n+ if self.reward_structure not in [\n+ \"cascade_additive\",\n+ \"cascade_exponential\",\n+ \"independent\",\n+ \"standard_additive\",\n+ \"standard_exponential\",\n+ ]:\nraise ValueError(\nf\"reward_structure must be either 'RIPS', 'SIPS', or 'IIPS', but {self.reward_structure} is given.'\"\n)\n@@ -193,54 +201,62 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"exam_weight must be ndarray or None, but {self.exam_weight} is given\"\n)\n- # TODO: fix reward structure names\n- if self.reward_structure == \"SIPS\":\n- self.slot_weight_matrix = self.get_sips_slot_weight(self.len_list)\n- elif self.reward_structure == \"RIPS\":\n- self.slot_weight_matrix = self.get_rips_slot_weight(self.len_list)\n- elif self.reward_structure in [\"Cascade\", \"Greedy\"]:\n- # exam weight is reset when reward structure is cascade of greedy\n+ # TODO: remove this line when we implement click models\nself.exam_weight = np.ones(self.len_list)\n- self.slot_weight_matrix = np.identity(self.len_list)\n+ # TODO: fix reward structure names\n+ if self.reward_structure in [\"cascade_additive\", \"standard_additive\"]:\n+ self.action_effect_matrix = generate_synmetric_matrix(\n+ self.random_state, self.n_actions\n+ )\n+ self.slot_weight_matrix = None\n+ if self.base_reward_function is not None:\n+ self.reward_function = action_effect_additive_reward_function\n+ self.is_cascade = self.reward_structure == \"cascade_additive\"\n+ else:\n+ self.action_effect_matrix = None\n+ self.is_cascade = None\n+ if self.base_reward_function is not None:\n+ self.reward_function = slot_weighted_reward_function\n+ if self.reward_structure == \"standard_exponential\":\n+ self.slot_weight_matrix = self.get_standard_exponential_slot_weight(\n+ self.len_list\n+ )\n+ elif self.reward_structure == \"cascade_exponential\":\n+ self.slot_weight_matrix = self.get_cascade_exponential_slot_weight(\n+ self.len_list\n+ )\nelse:\nself.slot_weight_matrix = np.identity(self.len_list)\n- if not isinstance(self.random_state, int):\n- raise ValueError(\"random_state must be an integer\")\n- self.random_ = check_random_state(self.random_state)\nif self.behavior_policy_function is None:\nself.behavior_policy = np.ones(self.n_actions) / self.n_actions\nif self.reward_type == \"continuous\":\nself.reward_min = 0\nself.reward_max = 1e10\nself.reward_std = 1.0\n- # set the base reward function\n- if self.reward_function is not None:\n- if self.reward_type == \"binary\":\n- self.base_reward_function = logistic_reward_function\n- else:\n- self.base_reward_function = linear_reward_function\n# one-hot encoding representations characterizing each action\nself.action_context = np.eye(self.n_actions, dtype=int)\n@staticmethod\n- def get_sips_slot_weight(len_list):\n+ def get_standard_exponential_slot_weight(len_list):\nslot_weight_matrix = np.ones((len_list, len_list))\nfor position_ in range(len_list):\n- slot_weight_matrix[:, position_] = 1 / np.exp(\n+ slot_weight_matrix[:, position_] = -1 / np.exp(\nnp.abs(np.arange(len_list) - position_)\n)\n+ slot_weight_matrix[position_, position_] = 1\nreturn slot_weight_matrix\n@staticmethod\n- def get_rips_slot_weight(len_list):\n+ def get_cascade_exponential_slot_weight(len_list):\nslot_weight_matrix = np.ones((len_list, len_list))\nfor position_ in range(len_list):\n- slot_weight_matrix[:, position_] = 1 / np.exp(\n+ slot_weight_matrix[:, position_] = -1 / np.exp(\nnp.abs(np.arange(len_list) - position_)\n)\nfor position_2 in range(len_list):\nif position_ < position_2:\nslot_weight_matrix[position_2, position_] = 0\n+ slot_weight_matrix[position_, position_] = 1\nreturn slot_weight_matrix\ndef get_marginal_pscore(\n@@ -323,49 +339,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nsampled reward: array-like, shape (n_actions, len_list)\n\"\"\"\n- if self.reward_structure in [\"Cascade\", \"Greedy\"]:\n- reward = np.zeros(expected_reward_factual.shape)\n- for i in tqdm(\n- np.arange(reward.shape[0]),\n- desc=\"[sample_reward]\",\n- total=reward.shape[0],\n- ):\n- previous_reward = 0.0\n- # actions are in order of browsing assumption\n- action_list = (\n- np.arange(3)\n- if self.reward_structure == \"Cascade\"\n- else np.argsort(expected_reward_factual[i])[::-1]\n- )\n+ if self.click_model is None:\nif self.reward_type == \"binary\":\n- for position_ in action_list:\n- reward[i, position_] = self.random_.binomial(\n- n=1,\n- p=expected_reward_factual[i, position_]\n- * self.reward_transition_rate[int(previous_reward)],\n- )\n- previous_reward = reward[i, position_]\n- else:\n- for position_ in action_list:\n- mean = (\n- expected_reward_factual[i, position_]\n- * self.reward_transition_rate[int(previous_reward >= 0)]\n- )\n- a = (self.reward_min - mean) / self.reward_std\n- b = (self.reward_max - mean) / self.reward_std\n- reward[i, position_] = truncnorm.rvs(\n- a=a,\n- b=b,\n- loc=mean,\n- scale=self.reward_std,\n- random_state=self.random_state,\n- )\n- previous_reward = reward[i, position_]\n-\n- elif self.reward_type == \"binary\":\nreward = np.array(\n[\n- self.random_.binomial(n=1, p=expected_reward_factual[:, position_])\n+ self.random_.binomial(\n+ n=1, p=expected_reward_factual[:, position_]\n+ )\nfor position_ in range(self.len_list)\n]\n).T\n@@ -384,6 +364,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\nelse:\nraise NotImplementedError\n+ else:\n+ raise NotImplementedError\n# return: array-like, shape (n_rounds, len_list)\nreturn reward\n@@ -447,7 +429,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn_pscore_marginal=return_pscore_marginal,\n)\n# sample expected reward factual\n- if self.reward_function is None:\n+ if self.base_reward_function is None:\nexpected_reward = self.sample_contextfree_expected_reward()\nexpected_reward_tile = np.tile(expected_reward, (n_rounds, 1, 1))\n# action_2d: array-like, shape (n_rounds, len_list)\n@@ -467,8 +449,11 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_context=self.action_context,\naction=action,\nslot_weight_matrix=self.slot_weight_matrix,\n- exam_weight=self.exam_weight,\nbase_function=self.base_reward_function,\n+ is_cascade=self.is_cascade,\n+ reward_type=self.reward_type,\n+ len_list=self.len_list,\n+ action_effect_matrix=self.action_effect_matrix,\nrandom_state=self.random_state,\n)\n# check the shape of expected_reward_factual\n@@ -498,33 +483,132 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\n-def weighted_reward_function(\n+def generate_synmetric_matrix(random_state: int, n_actions: int):\n+ random_ = check_random_state(random_state)\n+ base_matrix = random_.normal(size=(n_actions, n_actions))\n+ return (\n+ np.tril(base_matrix) + np.tril(base_matrix).T - np.diag(base_matrix.diagonal())\n+ )\n+\n+\n+def action_effect_additive_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ action: np.ndarray,\n+ base_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\n+ action_effect_matrix: np.ndarray,\n+ is_cascade: bool,\n+ len_list: int,\n+ reward_type: str,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+) -> np.ndarray:\n+ \"\"\"TODO: comment\"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action, np.ndarray) or action.ndim != 1:\n+ raise ValueError(\"action must be 1-dimensional ndarray\")\n+\n+ if len_list * context.shape[0] != action.shape[0]:\n+ raise ValueError(\n+ \"the size of axis 0 of context muptiplied by len_list must be the same as that of action\"\n+ )\n+\n+ if action_effect_matrix.shape != (\n+ action_context.shape[0],\n+ action_context.shape[0],\n+ ):\n+ raise ValueError(\n+ f\"the shape of action effect matrix must be (action_context.shape[0], action_context.shape[0]), but {action_effect_matrix.shape}\"\n+ )\n+\n+ if reward_type not in [\n+ \"binary\",\n+ \"continuous\",\n+ ]:\n+ raise ValueError(\n+ f\"reward_type must be either 'binary' or 'continuous', but {reward_type} is given.'\"\n+ )\n+\n+ # action_2d: array-like, shape (n_rounds, len_list)\n+ action_2d = action.reshape((context.shape[0], len_list))\n+ # expected_reward: array-like, shape (n_rounds, n_actions)\n+ expected_reward = base_function(\n+ context=context, action_context=action_context, random_state=random_state\n+ )\n+ if reward_type == \"binary\":\n+ expected_reward = np.log(expected_reward / (1 - expected_reward))\n+ expected_reward_factual = np.zeros_like(action_2d)\n+ for position_ in range(len_list):\n+ tmp_fixed_reward = expected_reward[\n+ np.arange(context.shape[0]), action_2d[:, position_]\n+ ]\n+ for position2_ in range(len_list):\n+ if is_cascade:\n+ if position_ >= position2_:\n+ break\n+ elif position_ == position2_:\n+ continue\n+ tmp_fixed_reward += action_effect_matrix[\n+ action_2d[:, position_], action_2d[:, position2_]\n+ ]\n+ expected_reward_factual[:, position_] = tmp_fixed_reward\n+ if reward_type == \"binary\":\n+ expected_reward_factual = sigmoid(expected_reward_factual)\n+ assert expected_reward_factual.shape == (\n+ context.shape[0],\n+ len_list,\n+ ), f\"response shape must be (n_rounds, len_list), but {expected_reward_factual.shape}\"\n+ return expected_reward_factual\n+\n+\n+def slot_weighted_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\naction: np.ndarray,\n- slot_weight_matrix: np.ndarray,\n- exam_weight: np.ndarray,\nbase_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\n+ slot_weight_matrix: np.ndarray,\n+ reward_type: str,\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"TODO: comment\nslot_weight_matrix: array-like, shape (len_list, len_list)\n\"\"\"\n- # fix slot_weight_matrix by exam_weight\n- slot_weight_matrix = slot_weight_matrix * exam_weight\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action_context, np.ndarray) or action_context.ndim != 2:\n+ raise ValueError(\"action_context must be 2-dimensional ndarray\")\n+\n+ if not isinstance(action, np.ndarray) or action.ndim != 1:\n+ raise ValueError(\"action must be 1-dimensional ndarray\")\n+\n+ if reward_type not in [\n+ \"binary\",\n+ \"continuous\",\n+ ]:\n+ raise ValueError(\n+ f\"reward_type must be either 'binary' or 'continuous', but {reward_type} is given.'\"\n+ )\n+ if slot_weight_matrix.shape[0] * context.shape[0] != action.shape[0]:\n+ raise ValueError(\n+ \"the size of axis 0 of slot_weight_matrix muptiplied by that of context must be the same as that of action\"\n+ )\n# action_2d: array-like, shape (n_rounds, len_list)\naction_2d = action.reshape((context.shape[0], slot_weight_matrix.shape[0]))\n# action_3d: array-like, shape (n_rounds, n_actions, len_list)\naction_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)\n- if slot_weight_matrix.shape[0] < action_3d.shape[2]:\n- raise ValueError(\n- \"the size of axis 0 of slot_weight_matrix must be the same as the size of axis 1 of action_3d\"\n- )\n# expected_reward: array-like, shape (n_rounds, n_actions)\nexpected_reward = base_function(\ncontext=context, action_context=action_context, random_state=random_state\n)\n+ if reward_type == \"binary\":\n+ expected_reward = np.log(expected_reward / (1 - expected_reward))\n# expected_reward_3d: array-like, shape (n_rounds, n_actions, len_list)\nexpected_reward_3d = np.tile(\nexpected_reward, (slot_weight_matrix.shape[0], 1, 1)\n@@ -534,11 +618,17 @@ def weighted_reward_function(\n# weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\nweighted_expected_reward = action_weight * expected_reward_3d\n# expected_reward_factual: list, shape (n_rounds, len_list)\n- expected_reward_factual = (\n- weighted_expected_reward.sum(axis=1) / slot_weight_matrix.shape[0]\n- )\n+ expected_reward_factual = weighted_expected_reward.sum(axis=1)\n+ if reward_type == \"binary\":\n+ expected_reward_factual = sigmoid(expected_reward_factual)\n+ # q_l = \\sum_{a} a3d[i, a, l] q_a + \\sum_{a_1, a_2} delta(a_1, a_2)\n# return: array, shape (n_rounds, len_list)\n- return np.array(expected_reward_factual)\n+ result = np.array(expected_reward_factual)\n+ assert result.shape == (\n+ context.shape[0],\n+ slot_weight_matrix.shape[0],\n+ ), f\"response shape must be (n_rounds, len_list), but {result.shape}\"\n+ return result\ndef linear_behavior_policy_logit(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -4,14 +4,16 @@ import pytest\nimport numpy as np\nimport pandas as pd\n-from obp.dataset import SyntheticSlateBanditDataset\n+from obp.dataset.synthetic import (\n+ linear_reward_function,\n+ logistic_reward_function,\n+)\nfrom obp.dataset.synthetic_slate import (\n- weighted_reward_function,\nlinear_behavior_policy_logit,\n+ SyntheticSlateBanditDataset,\n)\nfrom obp.types import BanditFeedback\n-\n# n_actions, len_list, dim_context, reward_type, random_state, description\ninvalid_input_of_init = [\n(\"4\", 3, 2, \"binary\", 1, \"n_actions must be an integer larger than 1\"),\n@@ -234,6 +236,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\n), f\"pscore marginal must be None, but {bandit_feedback['pscore_marginal']}\"\n+# TODO: fix\ndef test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_and_sips_logistic_reward():\n# set parameters\nn_actions = 10\n@@ -242,17 +245,16 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nreward_type = \"binary\"\nrandom_state = 12345\nn_rounds = 100\n- reward_structure = \"SIPS\"\n+ reward_structure = \"standard_additive\"\ndataset = SyntheticSlateBanditDataset(\nn_actions=n_actions,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\nreward_structure=reward_structure,\n- exam_weight=1 / np.exp(np.arange(len_list)),\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=weighted_reward_function,\n+ base_reward_function=logistic_reward_function,\n)\n# get feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\n@@ -279,17 +281,16 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nreward_type = \"binary\"\nrandom_state = 12345\nn_rounds = 100\n- reward_structure = \"RIPS\"\n+ reward_structure = \"cascade_additive\"\ndataset = SyntheticSlateBanditDataset(\nn_actions=n_actions,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\nreward_structure=reward_structure,\n- exam_weight=1 / np.exp(np.arange(len_list)),\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=weighted_reward_function,\n+ base_reward_function=logistic_reward_function,\n)\n# get feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\n@@ -321,11 +322,10 @@ def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n- reward_structure=\"RIPS\",\n- exam_weight=1 / np.exp(np.arange(len_list)),\n+ reward_structure=\"cascade_additive\",\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=weighted_reward_function,\n+ base_reward_function=logistic_reward_function,\n)\n# get feedback\nbandit_feedback_r = dataset_r.obtain_batch_bandit_feedback(\n@@ -354,11 +354,10 @@ def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n- reward_structure=\"SIPS\",\n- exam_weight=1 / np.exp(np.arange(len_list)),\n+ reward_structure=\"standard_additive\",\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=weighted_reward_function,\n+ base_reward_function=logistic_reward_function,\n)\n# get feedback\nbandit_feedback_s = dataset_s.obtain_batch_bandit_feedback(\n@@ -387,11 +386,10 @@ def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n- reward_structure=\"IIPS\",\n- exam_weight=1 / np.exp(np.arange(len_list)),\n+ reward_structure=\"independent\",\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- reward_function=weighted_reward_function,\n+ base_reward_function=logistic_reward_function,\n)\n# get feedback\nbandit_feedback_i = dataset_i.obtain_batch_bandit_feedback(\n@@ -420,7 +418,7 @@ def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_\n# n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, exam_weight, behavior_policy_function, reward_function, return_pscore_marginal, description\n-valid_input_ = [\n+valid_input_of_obtain_batch_bandit_feedback = [\n(\n10,\n3,\n@@ -428,12 +426,12 @@ valid_input_ = [\n\"binary\",\n123,\n1000,\n- \"SIPS\",\n+ \"standard_additive\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ logistic_reward_function,\nFalse,\n- \"SIPS\",\n+ \"standard_additive\",\n),\n(\n10,\n@@ -442,12 +440,12 @@ valid_input_ = [\n\"binary\",\n123,\n1000,\n- \"IIPS\",\n+ \"independent\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ logistic_reward_function,\nFalse,\n- \"IIPS\",\n+ \"independent\",\n),\n(\n10,\n@@ -456,12 +454,12 @@ valid_input_ = [\n\"binary\",\n123,\n1000,\n- \"RIPS\",\n+ \"cascade_additive\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ logistic_reward_function,\nFalse,\n- \"RIPS\",\n+ \"cascade_additive\",\n),\n(\n10,\n@@ -470,12 +468,12 @@ valid_input_ = [\n\"continuous\",\n123,\n1000,\n- \"SIPS\",\n+ \"standard_additive\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ linear_reward_function,\nFalse,\n- \"SIPS continuous\",\n+ \"standard_additive continuous\",\n),\n(\n10,\n@@ -484,12 +482,12 @@ valid_input_ = [\n\"continuous\",\n123,\n1000,\n- \"IIPS\",\n+ \"independent\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ linear_reward_function,\nFalse,\n- \"IIPS continuous\",\n+ \"independent continuous\",\n),\n(\n10,\n@@ -498,12 +496,12 @@ valid_input_ = [\n\"continuous\",\n123,\n1000,\n- \"RIPS\",\n+ \"cascade_additive\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ linear_reward_function,\nFalse,\n- \"RIPS continuous\",\n+ \"cascade_additive continuous\",\n),\n(\n10,\n@@ -512,12 +510,12 @@ valid_input_ = [\n\"continuous\",\n123,\n1000,\n- \"RIPS\",\n+ \"cascade_additive\",\n1 / np.exp(np.arange(3)),\nNone,\nNone,\nFalse,\n- \"Random policy and reward function (continous reward)\",\n+ \"Random policy and reward function (continuous reward)\",\n),\n(\n10,\n@@ -526,12 +524,12 @@ valid_input_ = [\n\"binary\",\n123,\n1000,\n- \"Cascade\",\n+ \"cascade_exponential\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ logistic_reward_function,\nFalse,\n- \"Cascade (binary reward)\",\n+ \"cascade_exponential (binary reward)\",\n),\n(\n10,\n@@ -540,12 +538,12 @@ valid_input_ = [\n\"continuous\",\n123,\n1000,\n- \"Cascade\",\n+ \"cascade_exponential\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ linear_reward_function,\nFalse,\n- \"Cascade (continous reward)\",\n+ \"cascade_exponential (continuous reward)\",\n),\n(\n10,\n@@ -554,12 +552,12 @@ valid_input_ = [\n\"binary\",\n123,\n1000,\n- \"Greedy\",\n+ \"standard_exponential\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ logistic_reward_function,\nFalse,\n- \"Greedy (binary reward)\",\n+ \"standard_exponential (binary reward)\",\n),\n(\n10,\n@@ -568,19 +566,19 @@ valid_input_ = [\n\"continuous\",\n123,\n1000,\n- \"Greedy\",\n+ \"standard_exponential\",\n1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\n- weighted_reward_function,\n+ linear_reward_function,\nFalse,\n- \"Greedy (continous reward)\",\n+ \"standard_exponential (continuous reward)\",\n),\n]\[email protected](\n\"n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, exam_weight, behavior_policy_function, reward_function, return_pscore_marginal, description\",\n- valid_input_,\n+ valid_input_of_obtain_batch_bandit_feedback,\n)\ndef test_synthetic_slate_using_valid_inputs(\nn_actions,\n@@ -605,7 +603,7 @@ def test_synthetic_slate_using_valid_inputs(\nexam_weight=exam_weight,\nrandom_state=random_state,\nbehavior_policy_function=behavior_policy_function,\n- reward_function=reward_function,\n+ base_reward_function=reward_function,\n)\n# get feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/dataset/test_synthetic_slate_functions.py",
"diff": "+import numpy as np\n+import pytest\n+\n+from obp.dataset.synthetic import (\n+ linear_reward_function,\n+ logistic_reward_function,\n+)\n+from obp.dataset.synthetic_slate import (\n+ linear_behavior_policy_logit,\n+ slot_weighted_reward_function,\n+ action_effect_additive_reward_function,\n+ generate_synmetric_matrix,\n+)\n+\n+\n+def test_generate_synmetric_matrix():\n+ matrix = generate_synmetric_matrix(1, 3)\n+ assert matrix.shape == (3, 3)\n+ assert np.allclose(matrix, matrix.T)\n+\n+\n+# context, action_context, tau, err, description\n+invalid_input_of_linear_behavior_policy_logit = [\n+ (\n+ np.array([1.0, 1.0]),\n+ np.ones([2, 2]),\n+ None,\n+ ValueError,\n+ \"context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ [1.0, 1.0],\n+ np.ones([2, 2]),\n+ None,\n+ ValueError,\n+ \"context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([2, 2]),\n+ np.array([1.0, 1.0]),\n+ None,\n+ ValueError,\n+ \"action_context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([2, 2]),\n+ [1.0, 1.0],\n+ None,\n+ ValueError,\n+ \"action_context must be 2-dimensional ndarray\",\n+ ),\n+ (np.ones([2, 2]), np.ones([2, 2]), np.array([1]), TypeError, \"\"),\n+ (np.ones([2, 2]), np.ones([2, 2]), -1, ValueError, \"\"),\n+]\n+\n+\[email protected](\n+ \"context, action_context, tau, err, description\",\n+ invalid_input_of_linear_behavior_policy_logit,\n+)\n+def test_linear_behavior_policy_logit_using_invalid_input(\n+ context, action_context, tau, err, description\n+):\n+ if description == \"\":\n+ with pytest.raises(err):\n+ linear_behavior_policy_logit(\n+ context=context, action_context=action_context, tau=tau\n+ )\n+ else:\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ linear_behavior_policy_logit(\n+ context=context, action_context=action_context, tau=tau\n+ )\n+\n+\n+# context, action_context, tau, description\n+valid_input_of_linear_behavior_policy_logit = [\n+ (np.ones([2, 2]), np.ones([3, 2]), 1, \"valid input\"),\n+]\n+\n+\[email protected](\n+ \"context, action_context, tau, description\",\n+ valid_input_of_linear_behavior_policy_logit,\n+)\n+def test_linear_behavior_policy_logit_using_valid_input(\n+ context, action_context, tau, description\n+):\n+ logit_value = linear_behavior_policy_logit(\n+ context=context, action_context=action_context, tau=tau\n+ )\n+ assert logit_value.shape == (context.shape[0], action_context.shape[0])\n+\n+\n+# context, action_context, action, base_function, slot_weight_matrix, reward_type, random_state, err, description\n+invalid_input_of_slot_weighted_reward_function = [\n+ (\n+ np.array([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ logistic_reward_function,\n+ np.identity(3),\n+ \"binary\",\n+ 1,\n+ ValueError,\n+ \"context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.array([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ logistic_reward_function,\n+ np.identity(3),\n+ \"binary\",\n+ 1,\n+ ValueError,\n+ \"action_context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.ones([5, 2]),\n+ logistic_reward_function,\n+ np.identity(3),\n+ \"binary\",\n+ 1,\n+ ValueError,\n+ \"action must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.random.choice(5),\n+ logistic_reward_function,\n+ np.identity(3),\n+ \"binary\",\n+ 1,\n+ ValueError,\n+ \"action must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.ones(14),\n+ logistic_reward_function,\n+ np.identity(3),\n+ \"binary\",\n+ 1,\n+ ValueError,\n+ \"the size of axis 0\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"context, action_context, action, base_function, slot_weight_matrix, reward_type, random_state, err, description\",\n+ invalid_input_of_slot_weighted_reward_function,\n+)\n+def test_slot_weighted_reward_function_using_invalid_input(\n+ context,\n+ action_context,\n+ action,\n+ base_function,\n+ slot_weight_matrix,\n+ reward_type,\n+ random_state,\n+ err,\n+ description,\n+):\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = slot_weighted_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ action=action,\n+ slot_weight_matrix=slot_weight_matrix,\n+ base_function=base_function,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ )\n+\n+\n+# context, action_context, action, base_function, slot_weight_matrix, reward_type, random_state, description\n+valid_input_of_slot_weighted_reward_function = [\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ logistic_reward_function,\n+ np.identity(3),\n+ \"binary\",\n+ 1,\n+ \"binary reward\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ linear_reward_function,\n+ np.identity(3),\n+ \"continuous\",\n+ 1,\n+ \"continuous reward\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"context, action_context, action, base_function, slot_weight_matrix, reward_type, random_state, description\",\n+ valid_input_of_slot_weighted_reward_function,\n+)\n+def test_slot_weighted_reward_function_using_valid_input(\n+ context,\n+ action_context,\n+ action,\n+ base_function,\n+ slot_weight_matrix,\n+ reward_type,\n+ random_state,\n+ description,\n+):\n+ expected_reward_factual = slot_weighted_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ action=action,\n+ slot_weight_matrix=slot_weight_matrix,\n+ base_function=base_function,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ )\n+ assert expected_reward_factual.shape == (\n+ context.shape[0],\n+ slot_weight_matrix.shape[0],\n+ )\n+ if reward_type == \"binary\":\n+ assert np.all(0 <= expected_reward_factual) and np.all(\n+ expected_reward_factual <= 1\n+ )\n+\n+\n+# context, action_context, action, base_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\n+invalid_input_of_action_effect_reward_function = [\n+ (\n+ np.array([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ logistic_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"binary\",\n+ True,\n+ 3,\n+ 1,\n+ ValueError,\n+ \"context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.array([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ logistic_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"binary\",\n+ True,\n+ 3,\n+ 1,\n+ ValueError,\n+ \"action_context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.ones([5, 2]),\n+ logistic_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"binary\",\n+ True,\n+ 3,\n+ 1,\n+ ValueError,\n+ \"action must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.random.choice(5),\n+ logistic_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"binary\",\n+ True,\n+ 3,\n+ 1,\n+ ValueError,\n+ \"action must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.ones(10),\n+ logistic_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"binary\",\n+ True,\n+ 3,\n+ 1,\n+ ValueError,\n+ \"the size of axis 0\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ logistic_reward_function,\n+ generate_synmetric_matrix(1, 3),\n+ \"binary\",\n+ True,\n+ 3,\n+ 1,\n+ ValueError,\n+ \"the shape of action effect matrix must be\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"context, action_context, action, base_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\",\n+ invalid_input_of_action_effect_reward_function,\n+)\n+def test_action_effect_reward_function_using_invalid_input(\n+ context,\n+ action_context,\n+ action,\n+ base_function,\n+ action_effect_matrix,\n+ reward_type,\n+ is_cascade,\n+ len_list,\n+ random_state,\n+ err,\n+ description,\n+):\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = action_effect_additive_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ action=action,\n+ action_effect_matrix=action_effect_matrix,\n+ base_function=base_function,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ len_list=len_list,\n+ is_cascade=is_cascade,\n+ )\n+\n+\n+# context, action_context, action, base_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\n+valid_input_of_action_effect_reward_function = [\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ logistic_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"binary\",\n+ True,\n+ 3,\n+ 1,\n+ \"binary reward, cascade\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ linear_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"continuous\",\n+ True,\n+ 3,\n+ 1,\n+ \"continuous reward, cascade\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ logistic_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"binary\",\n+ False,\n+ 3,\n+ 1,\n+ \"binary reward, non_cascade\",\n+ ),\n+ (\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n+ linear_reward_function,\n+ generate_synmetric_matrix(1, 4),\n+ \"continuous\",\n+ False,\n+ 3,\n+ 1,\n+ \"continuous reward, non_cascade\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"context, action_context, action, base_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, description\",\n+ valid_input_of_action_effect_reward_function,\n+)\n+def test_action_effect_reward_function_using_valid_input(\n+ context,\n+ action_context,\n+ action,\n+ base_function,\n+ action_effect_matrix,\n+ reward_type,\n+ is_cascade,\n+ len_list,\n+ random_state,\n+ description,\n+):\n+ expected_reward_factual = action_effect_additive_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ action=action,\n+ action_effect_matrix=action_effect_matrix,\n+ base_function=base_function,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ len_list=len_list,\n+ is_cascade=is_cascade,\n+ )\n+ assert expected_reward_factual.shape == (\n+ context.shape[0],\n+ len_list,\n+ )\n+ if reward_type == \"binary\":\n+ assert np.all(0 <= expected_reward_factual) and np.all(\n+ expected_reward_factual <= 1\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix reward structures; add test of slate dataset |
641,006 | 03.04.2021 01:35:50 | -32,400 | e48e5def9d261b7935127d562eee4f025f73d074 | add option of calculating exact pscore marginal of random policy | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -276,6 +276,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nbehavior_policy_logit_: np.ndarray,\nn_rounds: int,\nreturn_pscore_marginal: bool = True,\n+ return_exact_uniform_pscore_marginal: bool = False,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray]]:\n\"\"\"TODO: comment\"\"\"\naction = np.zeros(n_rounds * self.len_list, dtype=int)\n@@ -303,16 +304,24 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_set = np.delete(action_set, action_set == action_sampled)\n# calculate marginal pscore\nif return_pscore_marginal:\n+ if return_exact_uniform_pscore_marginal:\n+ pscore_marginal[i * self.len_list + position_] = (\n+ self.len_list / self.n_actions\n+ )\n+ else:\npscore_marginal_i_l = 0.0\nfor perm in permutations(range(self.n_actions), self.len_list):\nif sampled_action_index not in perm:\ncontinue\npscore_marginal_i_l += self.get_marginal_pscore(\nperm=perm,\n- behavior_policy_logit_i_=behavior_policy_logit_[i : i + 1],\n+ behavior_policy_logit_i_=behavior_policy_logit_[\n+ i : i + 1\n+ ],\n)\n- pscore_marginal[i * self.len_list + position_] = pscore_marginal_i_l\n-\n+ pscore_marginal[\n+ i * self.len_list + position_\n+ ] = pscore_marginal_i_l\n# calculate joint pscore all\nstart_idx = i * self.len_list\nend_idx = start_idx + self.len_list\n@@ -374,6 +383,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nn_rounds: int,\ntau: Union[int, float] = 1.0,\nreturn_pscore_marginal: bool = True,\n+ return_exact_uniform_pscore_marginal: bool = False,\n) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\n@@ -390,6 +400,11 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_marginal` is returned or not.\nWhen `n_actions` and `len_list` are large, this parameter should be set to False because of the computational time\n+ return_exact_uniform_pscore_marginal: bool, default=False\n+ A boolean parameter whether `pscore_marginal` of uniform random policy is returned or not.\n+ When using uniform random policy, this parameter should be set to True\n+\n+\nReturns\n---------\nbandit_feedback: BanditFeedback\n@@ -400,6 +415,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"n_rounds must be a positive integer, but {n_rounds} is given\"\n)\n+ if (\n+ return_exact_uniform_pscore_marginal\n+ and self.behavior_policy_function is not None\n+ ):\n+ raise ValueError(\n+ \"return_exact_uniform_pscore_marginal must not be True when behavior_policy_function is not None\"\n+ )\ncontext = self.random_.normal(size=(n_rounds, self.dim_context))\n# sample actions for each round based on the behavior policy\n@@ -427,6 +449,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nbehavior_policy_logit_=behavior_policy_logit_,\nn_rounds=n_rounds,\nreturn_pscore_marginal=return_pscore_marginal,\n+ return_exact_uniform_pscore_marginal=return_exact_uniform_pscore_marginal,\n)\n# sample expected reward factual\nif self.base_reward_function is None:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -177,67 +177,35 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\n), f\"pscore_joint_all must be {pscore_above} for all impressions\"\n-def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy():\n+def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy_largescale():\n# set parameters\n- n_actions = 10\n- len_list = 3\n+ n_actions = 100\n+ len_list = 10\ndim_context = 2\nreward_type = \"binary\"\nrandom_state = 12345\n- n_rounds = 100\n- dataset = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n- len_list=len_list,\n- dim_context=dim_context,\n- reward_type=reward_type,\n- random_state=random_state,\n- behavior_policy_function=linear_behavior_policy_logit,\n- )\n- # get feedback\n- bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- # check slate bandit feedback (common test)\n- check_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n- # print reward\n- pscore_columns = [\n- \"pscore_joint_above\",\n- \"pscore_joint_all\",\n- \"pscore_marginal\",\n- ]\n- bandit_feedback_df = pd.DataFrame()\n- for column in [\"impression_id\", \"position\", \"action\", \"reward\"] + pscore_columns:\n- bandit_feedback_df[column] = bandit_feedback[column]\n- print(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\n-\n-\n-def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_without_pscore_marginal():\n- # set parameters\n- n_actions = 80\n- len_list = 3\n- dim_context = 2\n- reward_type = \"binary\"\n- random_state = 12345\n- n_rounds = 100\n+ n_rounds = 10000\ndataset = SyntheticSlateBanditDataset(\nn_actions=n_actions,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\nrandom_state=random_state,\n- behavior_policy_function=linear_behavior_policy_logit,\n)\n# get feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\n- n_rounds=n_rounds, return_pscore_marginal=False\n+ n_rounds=n_rounds, return_exact_uniform_pscore_marginal=True\n)\n# check slate bandit feedback (common test)\ncheck_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n- assert (\n- bandit_feedback[\"pscore_marginal\"] is None\n- ), f\"pscore marginal must be None, but {bandit_feedback['pscore_marginal']}\"\n+ # check pscore marginal\n+ pscore_marginal = float(len_list / n_actions)\n+ assert np.allclose(\n+ np.unique(bandit_feedback[\"pscore_marginal\"]), [pscore_marginal]\n+ ), f\"pscore_marginal must be [{pscore_marginal}], but {np.unique(bandit_feedback['pscore_marginal'])}\"\n-# TODO: fix\n-def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_and_sips_logistic_reward():\n+def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy():\n# set parameters\nn_actions = 10\nlen_list = 3\n@@ -245,23 +213,19 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nreward_type = \"binary\"\nrandom_state = 12345\nn_rounds = 100\n- reward_structure = \"standard_additive\"\ndataset = SyntheticSlateBanditDataset(\nn_actions=n_actions,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n- reward_structure=reward_structure,\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- base_reward_function=logistic_reward_function,\n)\n# get feedback\n- bandit_feedback = dataset.obtain_batch_bandit_feedback(\n- n_rounds=n_rounds, return_pscore_marginal=False\n- )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n# check slate bandit feedback (common test)\ncheck_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+ # print reward\npscore_columns = [\n\"pscore_joint_above\",\n\"pscore_joint_all\",\n@@ -273,24 +237,21 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nprint(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\n-def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_and_rips_logistic_reward():\n+def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_without_pscore_marginal():\n# set parameters\n- n_actions = 10\n+ n_actions = 80\nlen_list = 3\ndim_context = 2\nreward_type = \"binary\"\nrandom_state = 12345\nn_rounds = 100\n- reward_structure = \"cascade_additive\"\ndataset = SyntheticSlateBanditDataset(\nn_actions=n_actions,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n- reward_structure=reward_structure,\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- base_reward_function=logistic_reward_function,\n)\n# get feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\n@@ -298,123 +259,30 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\n)\n# check slate bandit feedback (common test)\ncheck_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n- pscore_columns = [\n- \"pscore_joint_above\",\n- \"pscore_joint_all\",\n- \"pscore_marginal\",\n- ]\n- bandit_feedback_df = pd.DataFrame()\n- for column in [\"impression_id\", \"position\", \"action\", \"reward\"] + pscore_columns:\n- bandit_feedback_df[column] = bandit_feedback[column]\n- print(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\n-\n+ assert (\n+ bandit_feedback[\"pscore_marginal\"] is None\n+ ), f\"pscore marginal must be None, but {bandit_feedback['pscore_marginal']}\"\n-def test_tmp_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_and_rips_logistic_reward():\n- # set parameters\n- n_actions = 10\n- len_list = 3\n- dim_context = 2\n- reward_type = \"binary\"\n- random_state = 123\n- n_rounds = 10000\n- dataset_r = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n- len_list=len_list,\n- dim_context=dim_context,\n- reward_type=reward_type,\n- reward_structure=\"cascade_additive\",\n- random_state=random_state,\n- behavior_policy_function=linear_behavior_policy_logit,\n- base_reward_function=logistic_reward_function,\n- )\n- # get feedback\n- bandit_feedback_r = dataset_r.obtain_batch_bandit_feedback(\n- n_rounds=n_rounds, return_pscore_marginal=False\n- )\n- # check slate bandit feedback (common test)\n- check_slate_bandit_feedback(bandit_feedback=bandit_feedback_r)\n- pscore_columns = [\n- \"pscore_joint_above\",\n- \"pscore_joint_all\",\n- \"pscore_marginal\",\n- ]\n- bandit_feedback_df_r = pd.DataFrame()\n- for column in [\n- \"impression_id\",\n- \"position\",\n- \"action\",\n- \"reward\",\n- \"expected_reward_factual\",\n- ] + pscore_columns:\n- bandit_feedback_df_r[column] = bandit_feedback_r[column]\n- print(bandit_feedback_df_r.groupby(\"position\")[\"reward\"].describe())\n- # sips\n- dataset_s = SyntheticSlateBanditDataset(\n+ # random seed should be fixed\n+ dataset2 = SyntheticSlateBanditDataset(\nn_actions=n_actions,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n- reward_structure=\"standard_additive\",\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n- base_reward_function=logistic_reward_function,\n)\n# get feedback\n- bandit_feedback_s = dataset_s.obtain_batch_bandit_feedback(\n+ bandit_feedback2 = dataset2.obtain_batch_bandit_feedback(\nn_rounds=n_rounds, return_pscore_marginal=False\n)\n# check slate bandit feedback (common test)\n- check_slate_bandit_feedback(bandit_feedback=bandit_feedback_s)\n- pscore_columns = [\n- \"pscore_joint_above\",\n- \"pscore_joint_all\",\n- \"pscore_marginal\",\n- ]\n- bandit_feedback_df_s = pd.DataFrame()\n- for column in [\n- \"impression_id\",\n- \"position\",\n- \"action\",\n- \"reward\",\n- \"expected_reward_factual\",\n- ] + pscore_columns:\n- bandit_feedback_df_s[column] = bandit_feedback_s[column]\n- print(bandit_feedback_df_s.groupby(\"position\")[\"reward\"].describe())\n- # iips\n- dataset_i = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n- len_list=len_list,\n- dim_context=dim_context,\n- reward_type=reward_type,\n- reward_structure=\"independent\",\n- random_state=random_state,\n- behavior_policy_function=linear_behavior_policy_logit,\n- base_reward_function=logistic_reward_function,\n- )\n- # get feedback\n- bandit_feedback_i = dataset_i.obtain_batch_bandit_feedback(\n- n_rounds=n_rounds, return_pscore_marginal=False\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback2)\n+ # check random seed effect\n+ assert np.allclose(\n+ bandit_feedback[\"expected_reward_factual\"],\n+ bandit_feedback2[\"expected_reward_factual\"],\n)\n- # check slate bandit feedback (common test)\n- check_slate_bandit_feedback(bandit_feedback=bandit_feedback_i)\n- pscore_columns = [\n- \"pscore_joint_above\",\n- \"pscore_joint_all\",\n- \"pscore_marginal\",\n- ]\n- bandit_feedback_df_i = pd.DataFrame()\n- for column in [\n- \"impression_id\",\n- \"position\",\n- \"action\",\n- \"reward\",\n- \"expected_reward_factual\",\n- ] + pscore_columns:\n- bandit_feedback_df_i[column] = bandit_feedback_i[column]\n- print(bandit_feedback_df_i.groupby(\"position\")[\"reward\"].describe())\n- # import pdb\n-\n- # pdb.set_trace()\n# n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, exam_weight, behavior_policy_function, reward_function, return_pscore_marginal, description\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add option of calculating exact pscore marginal of random policy |
641,006 | 03.04.2021 21:21:12 | -32,400 | a52d8602f98e46f58d56e795108c27f020961ae8 | add comment; add click models | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -51,11 +51,11 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nWhen 'cascade_additive' or 'standard_additive' is given, action_effect_matrix (:math:`w`) is generated.\nWhen 'cascade_exponential', 'standard_exponential', or 'independent' is given, slot_weight_matrix is generated.\nExpected reward is calculated as follows (:math:`f` is a base reward function of each item-position, and :math:`g` is a transform function):\n- 'cascade_additive': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j < k} w_{a(k), a(j)})`.\n+ 'cascade_additive': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j < k} W(a(k), a(j)))`.\n'cascade_exponential': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) - \\\\sum_{j < k} g^{-1}(f(x, a(j))) / \\\\exp(|k-j|))`.\n'independent': :math:`q_k(x, a) = f(x, a(k))`\n- 'standard_additive': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j \\\\neq k} w_{a(k), a(j)})`.\n- 'standard_exponential': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) - \\\\sum_{j \\\\neq k} g^{-1}(f(x, a(j))) / \\\\exp(|k-j|))`\n+ 'standard_additive': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j \\\\neq k} W(a(k), a(j)))`.\n+ 'standard_exponential': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) - \\\\sum_{j \\\\neq k} g^{-1}(f(x, a(j))) / \\\\exp(|k-j|))`.\nWhen reward_type is 'continous', transform function is the identity function.\nWhen reward_type is 'binray', transform function is the logit function.\n@@ -64,6 +64,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nWhen None is given, reward of each slot is sampled using the expected reward of the slot.\nWhen 'pbm' is given, reward of each slot is sampled using the position-based model.\nWhen 'cascade' is given, reward of each slot is sampled using the cascade model.\n+ When using some click model, 'continuous' reward type is unavailable.\nexam_weight: np.ndarray, default=None\nSlot-level examination probability.\n@@ -93,31 +94,36 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n>>> import numpy as np\n>>> from obp.dataset import (\n- SyntheticBanditDataset,\n- linear_reward_function,\n- linear_behavior_policy\n+ logistic_reward_function,\n+ linear_behavior_policy_logit,\n+ SyntheticSlateBanditDataset,\n)\n# generate synthetic contextual bandit feedback with 10 actions.\n- >>> dataset = SyntheticBanditDataset(\n+ >>> dataset = SyntheticSlateBanditDataset(\nn_actions=10,\ndim_context=5,\n- reward_function=logistic_reward_function,\n- behavior_policy=linear_behavior_policy,\n+ base_reward_function=logistic_reward_function,\n+ behavior_policy_function=linear_behavior_policy,\n+ reward_type='binary',\n+ reward_structure='cascade_additive',\n+ click_model='cascade',\n+ exam_weight=None,\nrandom_state=12345\n)\n- >>> bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=100000)\n+ >>> bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=5, return_pscore_item_position=True\n+ )\n>>> bandit_feedback\n{\n- 'n_rounds': 100000,\n+ 'n_rounds': 5,\n'n_actions': 10,\n+ 'slate_id': array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]),\n'context': array([[-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057],\n[ 1.39340583, 0.09290788, 0.28174615, 0.76902257, 1.24643474],\n[ 1.00718936, -1.29622111, 0.27499163, 0.22891288, 1.35291684],\n- ...,\n- [ 1.36946256, 0.58727761, -0.69296769, -0.27519988, -2.10289159],\n- [-0.27428715, 0.52635353, 1.02572168, -0.18486381, 0.72464834],\n- [-1.25579833, -1.42455203, -0.26361242, 0.27928604, 1.21015571]]),\n+ [ 0.88642934, -2.00163731, -0.37184254, 1.66902531, -0.43856974],\n+ [-0.53974145, 0.47698501, 3.24894392, -1.02122752, -0.5770873 ]]),\n'action_context': array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n@@ -128,24 +134,21 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]),\n- 'action': array([7, 4, 0, ..., 7, 9, 6]),\n- 'position': None,\n- 'reward': array([0, 1, 1, ..., 0, 1, 0]),\n- 'expected_reward': array([[0.80210203, 0.73828559, 0.83199558, ..., 0.81190503, 0.70617705,\n- 0.68985306],\n- [0.94119582, 0.93473317, 0.91345213, ..., 0.94140688, 0.93152449,\n- 0.90132868],\n- [0.87248862, 0.67974991, 0.66965669, ..., 0.79229752, 0.82712978,\n- 0.74923536],\n- ...,\n- [0.64856003, 0.38145901, 0.84476094, ..., 0.40962057, 0.77114661,\n- 0.65752798],\n- [0.73208527, 0.82012699, 0.78161352, ..., 0.72361416, 0.8652249 ,\n- 0.82571751],\n- [0.40348366, 0.24485417, 0.24037926, ..., 0.49613133, 0.30714854,\n- 0.5527749 ]]),\n- 'pscore': array([0.05423855, 0.10339675, 0.09756788, ..., 0.05423855, 0.07250876,\n- 0.14065505])\n+ 'action': array([8, 6, 5, 4, 7, 0, 1, 3, 5, 4, 6, 1, 4, 1, 7]),\n+ 'position': array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]),\n+ 'reward': array([1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0]),\n+ 'expected_reward_factual': array([0.5 , 0.73105858, 0.5 , 0.88079708, 0.88079708,\n+ 0.88079708, 0.5 , 0.73105858, 0.5 , 0.5 ,\n+ 0.26894142, 0.5 , 0.73105858, 0.73105858, 0.5 ]),\n+ 'pscore_cascade': array([0.05982646, 0.00895036, 0.00127176, 0.10339675, 0.00625482,\n+ 0.00072447, 0.14110696, 0.01868618, 0.00284884, 0.10339675,\n+ 0.01622041, 0.00302774, 0.10339675, 0.01627253, 0.00116824]),\n+ 'pscore': array([0.00127176, 0.00127176, 0.00127176, 0.00072447, 0.00072447,\n+ 0.00072447, 0.00284884, 0.00284884, 0.00284884, 0.00302774,\n+ 0.00302774, 0.00302774, 0.00116824, 0.00116824, 0.00116824]),\n+ 'pscore_item_position': array([0.19068462, 0.40385939, 0.33855573, 0.31231088, 0.40385939,\n+ 0.2969341 , 0.40489767, 0.31220474, 0.3388982 , 0.31231088,\n+ 0.33855573, 0.40489767, 0.31231088, 0.40489767, 0.33855573])\n}\n\"\"\"\n@@ -206,27 +209,28 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"reward_structure must be either 'RIPS', 'SIPS', or 'IIPS', but {self.reward_structure} is given.'\"\n)\n- if self.exam_weight is None:\n- self.exam_weight = np.ones(self.len_list)\n- else:\n- if not isinstance(self.exam_weight, np.ndarray):\n+ if self.click_model == \"pbm\" and not isinstance(self.exam_weight, np.ndarray):\nraise ValueError(\n- f\"exam_weight must be ndarray or None, but {self.exam_weight} is given\"\n+ f\"exam_weight must be ndarray when click model is 'pbm', but {self.exam_weight} is given\"\n)\n- # TODO: remove this line when we implement click models\n+ if self.click_model is not None and self.reward_type == \"continuous\":\n+ raise ValueError(\n+ \"continuous reward type is unavailable when click model is given\"\n+ )\n+ if self.click_model != \"pbm\":\nself.exam_weight = np.ones(self.len_list)\n- # TODO: fix reward structure names\nif self.reward_structure in [\"cascade_additive\", \"standard_additive\"]:\n+ # generate action effect matrix\nself.action_effect_matrix = generate_synmetric_matrix(\nself.random_state, self.n_actions\n)\n+ # slot weight matrix is not used when reward structure is additive\nself.slot_weight_matrix = None\nif self.base_reward_function is not None:\nself.reward_function = action_effect_additive_reward_function\n- self.is_cascade = self.reward_structure == \"cascade_additive\"\nelse:\n+ # action effect matrix is not used when reward structure is not additive\nself.action_effect_matrix = None\n- self.is_cascade = None\nif self.base_reward_function is not None:\nself.reward_function = slot_weighted_reward_function\nif self.reward_structure == \"standard_exponential\":\n@@ -250,6 +254,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n@staticmethod\ndef obtain_standard_exponential_slot_weight(len_list):\n+ \"\"\"Obtain slot weight matrix for standard exponential reward structure (symmetric matrix)\"\"\"\nslot_weight_matrix = np.ones((len_list, len_list))\nfor position_ in np.arange(len_list):\nslot_weight_matrix[:, position_] = -1 / np.exp(\n@@ -260,6 +265,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n@staticmethod\ndef obtain_cascade_exponential_slot_weight(len_list):\n+ \"\"\"Obtain slot weight matrix for cascade exponential reward structure (upper triangular matrix)\"\"\"\nslot_weight_matrix = np.ones((len_list, len_list))\nfor position_ in np.arange(len_list):\nslot_weight_matrix[:, position_] = -1 / np.exp(\n@@ -272,11 +278,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn slot_weight_matrix\ndef calc_item_position_pscore(\n- self, perm: List[int], behavior_policy_logit_i_: np.ndarray\n+ self, action_list: List[int], behavior_policy_logit_i_: np.ndarray\n) -> float:\n+ \"\"\"Calculate pscore_item_position\"\"\"\nunique_action_set = np.arange(self.n_actions)\npscore_ = 1.0\n- for action in perm:\n+ for action in action_list:\nscore_ = softmax(behavior_policy_logit_i_[:, unique_action_set])[0]\naction_index = np.where(unique_action_set == action)[0][0]\npscore_ *= score_[action_index]\n@@ -292,7 +299,44 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn_pscore_item_position: bool = True,\nreturn_exact_uniform_pscore_item_position: bool = False,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray]]:\n- \"\"\"TODO: comment\"\"\"\n+ \"\"\"Sample action and obtain pscores.\n+\n+ Parameters\n+ ------------\n+ behavior_policy_logit_: array-like, shape (n_rounds, n_actiions)\n+ Logit given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n+\n+ n_rounds: int\n+ Number of rounds for synthetic bandit feedback data.\n+\n+ return_pscore_item_position: bool, default=True\n+ A boolean parameter whether `pscore_item_position` is returned or not.\n+ When `n_actions` and `len_list` are large, this parameter should be set to False because of the computational time\n+\n+ return_exact_uniform_pscore_item_position: bool, default=False\n+ A boolean parameter whether `pscore_item_position` of uniform random policy is returned or not.\n+ When using uniform random policy, this parameter should be set to True\n+\n+\n+ Returns\n+ ----------\n+ action: array-like, shape (n_actions * len_list)\n+ Sampled action.\n+ Action list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`]\n+\n+ pscore_cascade: array-like, shape (n_actions * len_list)\n+ Joint action choice probabilities above the slot (:math:`k`) in each slate given context (:math:`x`).\n+ i.e., :math:`\\\\pi_k: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{k})`.\n+\n+ pscore: array-like, shape (n_actions * len_list)\n+ Joint action choice probabilities of the slate given context (:math:`x`).\n+ i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{\\\\text{len list}})`.\n+\n+ pscore_item_position: array-like, shape (n_actions * len_list)\n+ Marginal action choice probabilities of each slot given context (:math:`x`).\n+ i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+\n+ \"\"\"\naction = np.zeros(n_rounds * self.len_list, dtype=int)\npscore_cascade = np.zeros(n_rounds * self.len_list)\npscore = np.zeros(n_rounds * self.len_list)\n@@ -332,11 +376,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\nelse:\npscore_item_position_i_l = 0.0\n- for perm in permutations(range(self.n_actions), self.len_list):\n- if sampled_action_index not in perm:\n+ for action_list in permutations(\n+ range(self.n_actions), self.len_list\n+ ):\n+ if sampled_action_index not in action_list:\ncontinue\npscore_item_position_i_l += self.calc_item_position_pscore(\n- perm=perm,\n+ action_list=action_list,\nbehavior_policy_logit_i_=behavior_policy_logit_[\ni : i + 1\n],\n@@ -357,26 +403,24 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndef sample_reward_given_expected_reward(\nself, expected_reward_factual: np.ndarray\n- ) -> Tuple[np.ndarray, np.ndarray]:\n+ ) -> np.ndarray:\n\"\"\"Sample reward for each action and slot based on expected_reward_factual\nParameters\n------------\nexpected_reward_factual: array-like, shape (n_rounds, len_list)\n- expected reward of factual actions\n+ Expected reward of factual actions given context.\nReturns\n----------\n- sampled reward: array-like, shape (n_actions, len_list)\n+ reward: array-like, shape (n_actions, len_list)\n\"\"\"\n- if self.click_model is None:\n+ expected_reward_factual = expected_reward_factual * self.exam_weight\nif self.reward_type == \"binary\":\nreward = np.array(\n[\n- self.random_.binomial(\n- n=1, p=expected_reward_factual[:, position_]\n- )\n+ self.random_.binomial(n=1, p=expected_reward_factual[:, position_])\nfor position_ in np.arange(self.len_list)\n]\n).T\n@@ -395,8 +439,14 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\nelse:\nraise NotImplementedError\n- else:\n- raise NotImplementedError\n+ if self.click_model == \"cascade\":\n+ argmax_first_slot = np.argmax(reward, axis=1)\n+ for i, j in tqdm(\n+ enumerate(argmax_first_slot),\n+ desc=\"[sample_reward_of_cascade_model]\",\n+ total=reward.shape[0],\n+ ):\n+ reward[i, j + 1 :] = 0\n# return: array-like, shape (n_rounds, len_list)\nreturn reward\n@@ -494,8 +544,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_context=self.action_context,\naction=action,\nslot_weight_matrix=self.slot_weight_matrix,\n- base_function=self.base_reward_function,\n- is_cascade=self.is_cascade,\n+ base_reward_function=self.base_reward_function,\n+ is_cascade=\"cascade\" in self.reward_structure,\nreward_type=self.reward_type,\nlen_list=self.len_list,\naction_effect_matrix=self.action_effect_matrix,\n@@ -540,7 +590,7 @@ def action_effect_additive_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\naction: np.ndarray,\n- base_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\n+ base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\naction_effect_matrix: np.ndarray,\nis_cascade: bool,\nlen_list: int,\n@@ -548,7 +598,52 @@ def action_effect_additive_reward_function(\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> np.ndarray:\n- \"\"\"TODO: comment\"\"\"\n+ \"\"\"Slot-weighted reward function\n+ slot_weight_matrix: array-like, shape (len_list, len_list)\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation for each action.\n+\n+ action: array-like, shape (n_actions * len_list)\n+ Sampled action.\n+ Action list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`]\n+\n+ base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\n+ Function generating expected reward for each given action-context pair,\n+ i.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n+ If None is set, context **independent** expected reward for each action will be\n+ sampled from the uniform distribution automatically.\n+\n+ reward_type: str, default='binary'\n+ Type of reward variable, which must be either 'binary' or 'continuous'.\n+ When 'binary' is given, expected reward is transformed by logit function.\n+\n+ action_effect_matrix (`W`): array-like, shape (n_actions, n_actions)\n+ `W(i, j)` is the interaction term between action `i` and `j`.\n+\n+ len_list: int (> 1)\n+ Length of a list of actions recommended in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\n+ is_cascade: bool\n+ Whether reward structure is cascade-type or not\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_reward_factual: array-like, shape (n_rounds, len_list)\n+ Sampled expected reward of factual actions\n+ When is_cascade is true, :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j < k} W(a(k), a(j)))`.\n+ When is_cascade is false, :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j \\\\neq k} W(a(k), a(j)))`.\n+\n+ \"\"\"\nif not isinstance(context, np.ndarray) or context.ndim != 2:\nraise ValueError(\"context must be 2-dimensional ndarray\")\n@@ -582,7 +677,7 @@ def action_effect_additive_reward_function(\n# action_2d: array-like, shape (n_rounds, len_list)\naction_2d = action.reshape((context.shape[0], len_list))\n# expected_reward: array-like, shape (n_rounds, n_actions)\n- expected_reward = base_function(\n+ expected_reward = base_reward_function(\ncontext=context, action_context=action_context, random_state=random_state\n)\nif reward_type == \"binary\":\n@@ -615,14 +710,48 @@ def slot_weighted_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\naction: np.ndarray,\n- base_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\n+ base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\nslot_weight_matrix: np.ndarray,\nreward_type: str,\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> np.ndarray:\n- \"\"\"TODO: comment\n+ \"\"\"Slot-weighted reward function\nslot_weight_matrix: array-like, shape (len_list, len_list)\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation for each action.\n+\n+ action: array-like, shape (n_actions * len_list)\n+ Sampled action.\n+ Action list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`]\n+\n+ base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\n+ Function generating expected reward for each given action-context pair,\n+ i.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n+ If None is set, context **independent** expected reward for each action will be\n+ sampled from the uniform distribution automatically.\n+\n+ reward_type: str, default='binary'\n+ Type of reward variable, which must be either 'binary' or 'continuous'.\n+ When 'binary' is given, expected reward is transformed by logit function.\n+\n+ slot_weight_matrix (`W`): array-like, shape (len_list, len_list)\n+ `W(i, j)` is the weight of how the expected reward of slot `i` affects that of slot `j`.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_reward_factual: array-like, shape (n_rounds, len_list)\n+ Sampled expected reward of factual actions (:math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j \\\\neq k} g^{-1}(f(x, a(j))) * W(k, j)`)\n+\n\"\"\"\nif not isinstance(context, np.ndarray) or context.ndim != 2:\nraise ValueError(\"context must be 2-dimensional ndarray\")\n@@ -649,7 +778,7 @@ def slot_weighted_reward_function(\n# action_3d: array-like, shape (n_rounds, n_actions, len_list)\naction_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)\n# expected_reward: array-like, shape (n_rounds, n_actions)\n- expected_reward = base_function(\n+ expected_reward = base_reward_function(\ncontext=context, action_context=action_context, random_state=random_state\n)\nif reward_type == \"binary\":\n@@ -702,7 +831,7 @@ def linear_behavior_policy_logit(\nReturns\n---------\nlogit value: array-like, shape (n_rounds, n_actions)\n- logit given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n+ Logit given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n\"\"\"\nif not isinstance(context, np.ndarray) or context.ndim != 2:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -4,14 +4,13 @@ import pytest\nimport numpy as np\nimport pandas as pd\n-from obp.dataset.synthetic import (\n+from obp.dataset import (\nlinear_reward_function,\nlogistic_reward_function,\n-)\n-from obp.dataset.synthetic_slate import (\nlinear_behavior_policy_logit,\nSyntheticSlateBanditDataset,\n)\n+\nfrom obp.types import BanditFeedback\n# n_actions, len_list, dim_context, reward_type, random_state, description\n@@ -227,6 +226,8 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nfor column in [\"slate_id\", \"position\", \"action\", \"reward\"] + pscore_columns:\nbandit_feedback_df[column] = bandit_feedback[column]\nprint(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\n+ if reward_type == \"binary\":\n+ assert set(np.unique(bandit_feedback[\"reward\"])) == set([0, 1])\ndef test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_without_pscore_item_position():\n@@ -275,9 +276,11 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nbandit_feedback[\"expected_reward_factual\"],\nbandit_feedback2[\"expected_reward_factual\"],\n)\n+ if reward_type == \"binary\":\n+ assert set(np.unique(bandit_feedback[\"reward\"])) == set([0, 1])\n-# n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\n+# n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\nvalid_input_of_obtain_batch_bandit_feedback = [\n(\n10,\n@@ -287,7 +290,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_additive\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -301,7 +305,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"independent\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -315,7 +320,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_additive\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -329,7 +335,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_additive\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -343,7 +350,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"independent\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -357,7 +365,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_additive\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -371,7 +380,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_additive\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nNone,\nNone,\nFalse,\n@@ -385,7 +395,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_exponential\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -399,7 +410,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_exponential\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -413,7 +425,8 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_exponential\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -427,17 +440,168 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_exponential\",\n- 1 / np.exp(np.arange(3)),\n+ None,\n+ None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n\"standard_exponential (continuous reward)\",\n),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"cascade_additive\",\n+ \"cascade\",\n+ None,\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"cascade_additive, cascade click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"cascade_exponential\",\n+ \"cascade\",\n+ None,\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"cascade_exponential, cascade click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"standard_additive\",\n+ \"cascade\",\n+ None,\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"standard_additive, cascade click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"standard_exponential\",\n+ \"cascade\",\n+ None,\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"standard_exponential, cascade click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"independent\",\n+ \"cascade\",\n+ None,\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"independent, cascade click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"cascade_additive\",\n+ \"pbm\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"cascade_additive, pbm click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"cascade_exponential\",\n+ \"pbm\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"cascade_exponential, pbm click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"standard_additive\",\n+ \"pbm\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"standard_additive, pbm click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"standard_exponential\",\n+ \"pbm\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"standard_exponential, pbm click model (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"independent\",\n+ \"pbm\",\n+ 1 / np.exp(np.arange(3)),\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"independent, pbm click model (binary reward)\",\n+ ),\n]\[email protected](\n- \"n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\",\n+ \"n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\",\nvalid_input_of_obtain_batch_bandit_feedback,\n)\ndef test_synthetic_slate_using_valid_inputs(\n@@ -448,6 +612,7 @@ def test_synthetic_slate_using_valid_inputs(\nrandom_state,\nn_rounds,\nreward_structure,\n+ click_model,\nexam_weight,\nbehavior_policy_function,\nreward_function,\n@@ -460,6 +625,7 @@ def test_synthetic_slate_using_valid_inputs(\ndim_context=dim_context,\nreward_type=reward_type,\nreward_structure=reward_structure,\n+ click_model=click_model,\nexam_weight=exam_weight,\nrandom_state=random_state,\nbehavior_policy_function=behavior_policy_function,\n@@ -487,3 +653,5 @@ def test_synthetic_slate_using_valid_inputs(\nbandit_feedback_df[column] = bandit_feedback[column]\nprint(f\"-------{description}--------\")\nprint(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\n+ if reward_type == \"binary\":\n+ assert set(np.unique(bandit_feedback[\"reward\"])) == set([0, 1])\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate_functions.py",
"new_path": "tests/dataset/test_synthetic_slate_functions.py",
"diff": "@@ -92,7 +92,7 @@ def test_linear_behavior_policy_logit_using_valid_input(\nassert logit_value.shape == (context.shape[0], action_context.shape[0])\n-# context, action_context, action, base_function, slot_weight_matrix, reward_type, random_state, err, description\n+# context, action_context, action, base_reward_function, slot_weight_matrix, reward_type, random_state, err, description\ninvalid_input_of_slot_weighted_reward_function = [\n(\nnp.array([5, 2]),\n@@ -153,14 +153,14 @@ invalid_input_of_slot_weighted_reward_function = [\[email protected](\n- \"context, action_context, action, base_function, slot_weight_matrix, reward_type, random_state, err, description\",\n+ \"context, action_context, action, base_reward_function, slot_weight_matrix, reward_type, random_state, err, description\",\ninvalid_input_of_slot_weighted_reward_function,\n)\ndef test_slot_weighted_reward_function_using_invalid_input(\ncontext,\naction_context,\naction,\n- base_function,\n+ base_reward_function,\nslot_weight_matrix,\nreward_type,\nrandom_state,\n@@ -173,13 +173,13 @@ def test_slot_weighted_reward_function_using_invalid_input(\naction_context=action_context,\naction=action,\nslot_weight_matrix=slot_weight_matrix,\n- base_function=base_function,\n+ base_reward_function=base_reward_function,\nreward_type=reward_type,\nrandom_state=random_state,\n)\n-# context, action_context, action, base_function, slot_weight_matrix, reward_type, random_state, description\n+# context, action_context, action, base_reward_function, slot_weight_matrix, reward_type, random_state, description\nvalid_input_of_slot_weighted_reward_function = [\n(\nnp.ones([5, 2]),\n@@ -205,14 +205,14 @@ valid_input_of_slot_weighted_reward_function = [\[email protected](\n- \"context, action_context, action, base_function, slot_weight_matrix, reward_type, random_state, description\",\n+ \"context, action_context, action, base_reward_function, slot_weight_matrix, reward_type, random_state, description\",\nvalid_input_of_slot_weighted_reward_function,\n)\ndef test_slot_weighted_reward_function_using_valid_input(\ncontext,\naction_context,\naction,\n- base_function,\n+ base_reward_function,\nslot_weight_matrix,\nreward_type,\nrandom_state,\n@@ -223,7 +223,7 @@ def test_slot_weighted_reward_function_using_valid_input(\naction_context=action_context,\naction=action,\nslot_weight_matrix=slot_weight_matrix,\n- base_function=base_function,\n+ base_reward_function=base_reward_function,\nreward_type=reward_type,\nrandom_state=random_state,\n)\n@@ -237,7 +237,7 @@ def test_slot_weighted_reward_function_using_valid_input(\n)\n-# context, action_context, action, base_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\n+# context, action_context, action, base_reward_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\ninvalid_input_of_action_effect_reward_function = [\n(\nnp.array([5, 2]),\n@@ -321,14 +321,14 @@ invalid_input_of_action_effect_reward_function = [\[email protected](\n- \"context, action_context, action, base_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\",\n+ \"context, action_context, action, base_reward_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\",\ninvalid_input_of_action_effect_reward_function,\n)\ndef test_action_effect_reward_function_using_invalid_input(\ncontext,\naction_context,\naction,\n- base_function,\n+ base_reward_function,\naction_effect_matrix,\nreward_type,\nis_cascade,\n@@ -343,7 +343,7 @@ def test_action_effect_reward_function_using_invalid_input(\naction_context=action_context,\naction=action,\naction_effect_matrix=action_effect_matrix,\n- base_function=base_function,\n+ base_reward_function=base_reward_function,\nreward_type=reward_type,\nrandom_state=random_state,\nlen_list=len_list,\n@@ -351,7 +351,7 @@ def test_action_effect_reward_function_using_invalid_input(\n)\n-# context, action_context, action, base_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\n+# context, action_context, action, base_reward_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\nvalid_input_of_action_effect_reward_function = [\n(\nnp.ones([5, 2]),\n@@ -405,14 +405,14 @@ valid_input_of_action_effect_reward_function = [\[email protected](\n- \"context, action_context, action, base_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, description\",\n+ \"context, action_context, action, base_reward_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, description\",\nvalid_input_of_action_effect_reward_function,\n)\ndef test_action_effect_reward_function_using_valid_input(\ncontext,\naction_context,\naction,\n- base_function,\n+ base_reward_function,\naction_effect_matrix,\nreward_type,\nis_cascade,\n@@ -425,7 +425,7 @@ def test_action_effect_reward_function_using_valid_input(\naction_context=action_context,\naction=action,\naction_effect_matrix=action_effect_matrix,\n- base_function=base_function,\n+ base_reward_function=base_reward_function,\nreward_type=reward_type,\nrandom_state=random_state,\nlen_list=len_list,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add comment; add click models |
641,006 | 04.04.2021 21:16:41 | -32,400 | 9a6edd92641f3f9b72fbacd1e2244b1316c5e672 | add comment and fix argument order (generate_symmetic_matrix) | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -221,8 +221,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself.exam_weight = np.ones(self.len_list)\nif self.reward_structure in [\"cascade_additive\", \"standard_additive\"]:\n# generate action effect matrix\n- self.action_effect_matrix = generate_synmetric_matrix(\n- self.random_state, self.n_actions\n+ self.action_effect_matrix = generate_symmetric_matrix(\n+ n_actions=self.n_actions, random_state=self.random_state\n)\n# slot weight matrix is not used when reward structure is additive\nself.slot_weight_matrix = None\n@@ -255,7 +255,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n@staticmethod\ndef obtain_standard_exponential_slot_weight(len_list):\n\"\"\"Obtain slot weight matrix for standard exponential reward structure (symmetric matrix)\"\"\"\n- slot_weight_matrix = np.ones((len_list, len_list))\n+ slot_weight_matrix = np.identity(len_list)\nfor position_ in np.arange(len_list):\nslot_weight_matrix[:, position_] = -1 / np.exp(\nnp.abs(np.arange(len_list) - position_)\n@@ -266,7 +266,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n@staticmethod\ndef obtain_cascade_exponential_slot_weight(len_list):\n\"\"\"Obtain slot weight matrix for cascade exponential reward structure (upper triangular matrix)\"\"\"\n- slot_weight_matrix = np.ones((len_list, len_list))\n+ slot_weight_matrix = np.identity(len_list)\nfor position_ in np.arange(len_list):\nslot_weight_matrix[:, position_] = -1 / np.exp(\nnp.abs(np.arange(len_list) - position_)\n@@ -578,12 +578,28 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\n-def generate_synmetric_matrix(random_state: int, n_actions: int):\n+def generate_symmetric_matrix(n_actions: int, random_state: int) -> np.ndarray:\n+ \"\"\"Generate symmetric matrix\n+\n+ Parameters\n+ -----------\n+\n+ n_actions: int (>= len_list)\n+ Number of actions.\n+\n+ random_state: int\n+ Controls the random seed in sampling elements of matrix.\n+\n+ Returns\n+ ---------\n+ symmetric_matrix: array-like, shape (n_actions, n_actions)\n+ \"\"\"\nrandom_ = check_random_state(random_state)\nbase_matrix = random_.normal(size=(n_actions, n_actions))\n- return (\n+ symmetric_matrix = (\nnp.tril(base_matrix) + np.tril(base_matrix).T - np.diag(base_matrix.diagonal())\n)\n+ return symmetric_matrix\ndef action_effect_additive_reward_function(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate_functions.py",
"new_path": "tests/dataset/test_synthetic_slate_functions.py",
"diff": "@@ -9,12 +9,12 @@ from obp.dataset.synthetic_slate import (\nlinear_behavior_policy_logit,\nslot_weighted_reward_function,\naction_effect_additive_reward_function,\n- generate_synmetric_matrix,\n+ generate_symmetric_matrix,\n)\n-def test_generate_synmetric_matrix():\n- matrix = generate_synmetric_matrix(1, 3)\n+def test_generate_symmetric_matrix():\n+ matrix = generate_symmetric_matrix(n_actions=3, random_state=1)\nassert matrix.shape == (3, 3)\nassert np.allclose(matrix, matrix.T)\n@@ -244,7 +244,7 @@ invalid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -257,7 +257,7 @@ invalid_input_of_action_effect_reward_function = [\nnp.array([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -270,7 +270,7 @@ invalid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.ones([5, 2]),\nlogistic_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -283,7 +283,7 @@ invalid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.random.choice(5),\nlogistic_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -296,7 +296,7 @@ invalid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.ones(10),\nlogistic_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -309,7 +309,7 @@ invalid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_synmetric_matrix(1, 3),\n+ generate_symmetric_matrix(n_actions=3, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -358,7 +358,7 @@ valid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -370,7 +370,7 @@ valid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlinear_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"continuous\",\nTrue,\n3,\n@@ -382,7 +382,7 @@ valid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"binary\",\nFalse,\n3,\n@@ -394,7 +394,7 @@ valid_input_of_action_effect_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlinear_reward_function,\n- generate_synmetric_matrix(1, 4),\n+ generate_symmetric_matrix(n_actions=4, random_state=1),\n\"continuous\",\nFalse,\n3,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add comment and fix argument order (generate_symmetic_matrix) |
641,006 | 04.04.2021 21:42:15 | -32,400 | 46bcc7a20b4d71ae202d2b91d0572b4efa767c02 | unify slot_weight and action_effect -> action_interaction | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -7,9 +7,9 @@ from obp.dataset.synthetic import linear_reward_function\nfrom obp.dataset.synthetic import linear_behavior_policy\nfrom obp.dataset.multiclass import MultiClassToBanditReduction\nfrom obp.dataset.synthetic_slate import SyntheticSlateBanditDataset\n-from obp.dataset.synthetic_slate import action_effect_additive_reward_function\n+from obp.dataset.synthetic_slate import action_interaction_additive_reward_function\nfrom obp.dataset.synthetic_slate import linear_behavior_policy_logit\n-from obp.dataset.synthetic_slate import slot_weighted_reward_function\n+from obp.dataset.synthetic_slate import action_interaction_exponential_reward_function\n__all__ = [\n\"BaseBanditDataset\",\n@@ -21,7 +21,7 @@ __all__ = [\n\"linear_behavior_policy\",\n\"MultiClassToBanditReduction\",\n\"SyntheticSlateBanditDataset\",\n- \"action_effect_additive_reward_function\",\n+ \"action_interaction_additive_reward_function\",\n\"linear_behavior_policy_logit\",\n- \"slot_weighted_reward_function\",\n+ \"action_interaction_exponential_reward_function\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -48,8 +48,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreward_structure: str, default='cascade_additive'\nType of reward structure, which must be either 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', 'standard_exponential'.\n- When 'cascade_additive' or 'standard_additive' is given, action_effect_matrix (:math:`w`) is generated.\n- When 'cascade_exponential', 'standard_exponential', or 'independent' is given, slot_weight_matrix is generated.\n+ When 'cascade_additive' or 'standard_additive' is given, additive action_interaction_matrix (:math:`W \\\\in \\\\mathbb{R}^{\\\\text{n_actions} \\\\times \\\\text{n_actions}}`) is generated.\n+ When 'cascade_exponential', 'standard_exponential', or 'independent' is given, exponential action_interaction_matrix (:math:`\\\\in \\\\mathbb{R}^{\\\\text{len_list} \\\\times \\\\text{len_list}}`) is generated.\nExpected reward is calculated as follows (:math:`f` is a base reward function of each item-position, and :math:`g` is a transform function):\n'cascade_additive': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j < k} W(a(k), a(j)))`.\n'cascade_exponential': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) - \\\\sum_{j < k} g^{-1}(f(x, a(j))) / \\\\exp(|k-j|))`.\n@@ -220,29 +220,26 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nif self.click_model != \"pbm\":\nself.exam_weight = np.ones(self.len_list)\nif self.reward_structure in [\"cascade_additive\", \"standard_additive\"]:\n- # generate action effect matrix\n- self.action_effect_matrix = generate_symmetric_matrix(\n+ # generate additive action interaction matrix of (n_actions, n_actions)\n+ self.action_interaction_matrix = generate_symmetric_matrix(\nn_actions=self.n_actions, random_state=self.random_state\n)\n- # slot weight matrix is not used when reward structure is additive\n- self.slot_weight_matrix = None\nif self.base_reward_function is not None:\n- self.reward_function = action_effect_additive_reward_function\n+ self.reward_function = action_interaction_additive_reward_function\nelse:\n- # action effect matrix is not used when reward structure is not additive\n- self.action_effect_matrix = None\nif self.base_reward_function is not None:\n- self.reward_function = slot_weighted_reward_function\n+ self.reward_function = action_interaction_exponential_reward_function\n+ # generate exponential action interaction matrix of (len_list, len_list)\nif self.reward_structure == \"standard_exponential\":\n- self.slot_weight_matrix = self.obtain_standard_exponential_slot_weight(\n- self.len_list\n+ self.action_interaction_matrix = (\n+ self.obtain_standard_exponential_slot_weight(self.len_list)\n)\nelif self.reward_structure == \"cascade_exponential\":\n- self.slot_weight_matrix = self.obtain_cascade_exponential_slot_weight(\n- self.len_list\n+ self.action_interaction_matrix = (\n+ self.obtain_cascade_exponential_slot_weight(self.len_list)\n)\nelse:\n- self.slot_weight_matrix = np.identity(self.len_list)\n+ self.action_interaction_matrix = np.identity(self.len_list)\nif self.behavior_policy_function is None:\nself.behavior_policy = np.ones(self.n_actions) / self.n_actions\nif self.reward_type == \"continuous\":\n@@ -255,27 +252,27 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n@staticmethod\ndef obtain_standard_exponential_slot_weight(len_list):\n\"\"\"Obtain slot weight matrix for standard exponential reward structure (symmetric matrix)\"\"\"\n- slot_weight_matrix = np.identity(len_list)\n+ action_interaction_matrix = np.identity(len_list)\nfor position_ in np.arange(len_list):\n- slot_weight_matrix[:, position_] = -1 / np.exp(\n+ action_interaction_matrix[:, position_] = -1 / np.exp(\nnp.abs(np.arange(len_list) - position_)\n)\n- slot_weight_matrix[position_, position_] = 1\n- return slot_weight_matrix\n+ action_interaction_matrix[position_, position_] = 1\n+ return action_interaction_matrix\n@staticmethod\ndef obtain_cascade_exponential_slot_weight(len_list):\n\"\"\"Obtain slot weight matrix for cascade exponential reward structure (upper triangular matrix)\"\"\"\n- slot_weight_matrix = np.identity(len_list)\n+ action_interaction_matrix = np.identity(len_list)\nfor position_ in np.arange(len_list):\n- slot_weight_matrix[:, position_] = -1 / np.exp(\n+ action_interaction_matrix[:, position_] = -1 / np.exp(\nnp.abs(np.arange(len_list) - position_)\n)\nfor position_2 in np.arange(len_list):\nif position_ < position_2:\n- slot_weight_matrix[position_2, position_] = 0\n- slot_weight_matrix[position_, position_] = 1\n- return slot_weight_matrix\n+ action_interaction_matrix[position_2, position_] = 0\n+ action_interaction_matrix[position_, position_] = 1\n+ return action_interaction_matrix\ndef calc_item_position_pscore(\nself, action_list: List[int], behavior_policy_logit_i_: np.ndarray\n@@ -330,7 +327,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore: array-like, shape (n_actions * len_list)\nJoint action choice probabilities of the slate given context (:math:`x`).\n- i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{\\\\text{len list}})`.\n+ i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{\\\\text{len_list}})`.\npscore_item_position: array-like, shape (n_actions * len_list)\nMarginal action choice probabilities of each slot given context (:math:`x`).\n@@ -543,12 +540,11 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ncontext=context,\naction_context=self.action_context,\naction=action,\n- slot_weight_matrix=self.slot_weight_matrix,\n+ action_interaction_matrix=self.action_interaction_matrix,\nbase_reward_function=self.base_reward_function,\nis_cascade=\"cascade\" in self.reward_structure,\nreward_type=self.reward_type,\nlen_list=self.len_list,\n- action_effect_matrix=self.action_effect_matrix,\nrandom_state=self.random_state,\n)\n# check the shape of expected_reward_factual\n@@ -602,20 +598,19 @@ def generate_symmetric_matrix(n_actions: int, random_state: int) -> np.ndarray:\nreturn symmetric_matrix\n-def action_effect_additive_reward_function(\n+def action_interaction_additive_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\naction: np.ndarray,\nbase_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\n- action_effect_matrix: np.ndarray,\n+ action_interaction_matrix: np.ndarray,\nis_cascade: bool,\nlen_list: int,\nreward_type: str,\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> np.ndarray:\n- \"\"\"Slot-weighted reward function\n- slot_weight_matrix: array-like, shape (len_list, len_list)\n+ \"\"\"Reward function incorporating additive interactions among combinatorial action\nParameters\n-----------\n@@ -639,7 +634,7 @@ def action_effect_additive_reward_function(\nType of reward variable, which must be either 'binary' or 'continuous'.\nWhen 'binary' is given, expected reward is transformed by logit function.\n- action_effect_matrix (`W`): array-like, shape (n_actions, n_actions)\n+ action_interaction_matrix (`W`): array-like, shape (n_actions, n_actions)\n`W(i, j)` is the interaction term between action `i` and `j`.\nlen_list: int (> 1)\n@@ -674,12 +669,12 @@ def action_effect_additive_reward_function(\n\"the size of axis 0 of context muptiplied by len_list must be the same as that of action\"\n)\n- if action_effect_matrix.shape != (\n+ if action_interaction_matrix.shape != (\naction_context.shape[0],\naction_context.shape[0],\n):\nraise ValueError(\n- f\"the shape of action effect matrix must be (action_context.shape[0], action_context.shape[0]), but {action_effect_matrix.shape}\"\n+ f\"the shape of action effect matrix must be (action_context.shape[0], action_context.shape[0]), but {action_interaction_matrix.shape}\"\n)\nif reward_type not in [\n@@ -709,7 +704,7 @@ def action_effect_additive_reward_function(\nbreak\nelif position_ == position2_:\ncontinue\n- tmp_fixed_reward += action_effect_matrix[\n+ tmp_fixed_reward += action_interaction_matrix[\naction_2d[:, position_], action_2d[:, position2_]\n]\nexpected_reward_factual[:, position_] = tmp_fixed_reward\n@@ -722,18 +717,17 @@ def action_effect_additive_reward_function(\nreturn expected_reward_factual\n-def slot_weighted_reward_function(\n+def action_interaction_exponential_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\naction: np.ndarray,\nbase_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\n- slot_weight_matrix: np.ndarray,\n+ action_interaction_matrix: np.ndarray,\nreward_type: str,\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> np.ndarray:\n- \"\"\"Slot-weighted reward function\n- slot_weight_matrix: array-like, shape (len_list, len_list)\n+ \"\"\"Reward function incorporating exponential interactions among combinatorial action\nParameters\n-----------\n@@ -757,7 +751,7 @@ def slot_weighted_reward_function(\nType of reward variable, which must be either 'binary' or 'continuous'.\nWhen 'binary' is given, expected reward is transformed by logit function.\n- slot_weight_matrix (`W`): array-like, shape (len_list, len_list)\n+ action_interaction_matrix (`W`): array-like, shape (len_list, len_list)\n`W(i, j)` is the weight of how the expected reward of slot `i` affects that of slot `j`.\nrandom_state: int, default=None\n@@ -785,12 +779,12 @@ def slot_weighted_reward_function(\nraise ValueError(\nf\"reward_type must be either 'binary' or 'continuous', but {reward_type} is given.'\"\n)\n- if slot_weight_matrix.shape[0] * context.shape[0] != action.shape[0]:\n+ if action_interaction_matrix.shape[0] * context.shape[0] != action.shape[0]:\nraise ValueError(\n- \"the size of axis 0 of slot_weight_matrix muptiplied by that of context must be the same as that of action\"\n+ \"the size of axis 0 of action_interaction_matrix muptiplied by that of context must be the same as that of action\"\n)\n# action_2d: array-like, shape (n_rounds, len_list)\n- action_2d = action.reshape((context.shape[0], slot_weight_matrix.shape[0]))\n+ action_2d = action.reshape((context.shape[0], action_interaction_matrix.shape[0]))\n# action_3d: array-like, shape (n_rounds, n_actions, len_list)\naction_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)\n# expected_reward: array-like, shape (n_rounds, n_actions)\n@@ -801,10 +795,10 @@ def slot_weighted_reward_function(\nexpected_reward = np.log(expected_reward / (1 - expected_reward))\n# expected_reward_3d: array-like, shape (n_rounds, n_actions, len_list)\nexpected_reward_3d = np.tile(\n- expected_reward, (slot_weight_matrix.shape[0], 1, 1)\n+ expected_reward, (action_interaction_matrix.shape[0], 1, 1)\n).transpose(1, 2, 0)\n# action_weight: array-like, shape (n_rounds, n_actions, len_list)\n- action_weight = action_3d @ slot_weight_matrix\n+ action_weight = action_3d @ action_interaction_matrix\n# weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\nweighted_expected_reward = action_weight * expected_reward_3d\n# expected_reward_factual: list, shape (n_rounds, len_list)\n@@ -816,7 +810,7 @@ def slot_weighted_reward_function(\nresult = np.array(expected_reward_factual)\nassert result.shape == (\ncontext.shape[0],\n- slot_weight_matrix.shape[0],\n+ action_interaction_matrix.shape[0],\n), f\"response shape must be (n_rounds, len_list), but {result.shape}\"\nreturn result\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate_functions.py",
"new_path": "tests/dataset/test_synthetic_slate_functions.py",
"diff": "@@ -7,8 +7,8 @@ from obp.dataset.synthetic import (\n)\nfrom obp.dataset.synthetic_slate import (\nlinear_behavior_policy_logit,\n- slot_weighted_reward_function,\n- action_effect_additive_reward_function,\n+ action_interaction_exponential_reward_function,\n+ action_interaction_additive_reward_function,\ngenerate_symmetric_matrix,\n)\n@@ -92,8 +92,8 @@ def test_linear_behavior_policy_logit_using_valid_input(\nassert logit_value.shape == (context.shape[0], action_context.shape[0])\n-# context, action_context, action, base_reward_function, slot_weight_matrix, reward_type, random_state, err, description\n-invalid_input_of_slot_weighted_reward_function = [\n+# context, action_context, action, base_reward_function, action_interaction_matrix, reward_type, random_state, err, description\n+invalid_input_of_action_interaction_exponential_reward_function = [\n(\nnp.array([5, 2]),\nnp.ones([4, 2]),\n@@ -153,34 +153,34 @@ invalid_input_of_slot_weighted_reward_function = [\[email protected](\n- \"context, action_context, action, base_reward_function, slot_weight_matrix, reward_type, random_state, err, description\",\n- invalid_input_of_slot_weighted_reward_function,\n+ \"context, action_context, action, base_reward_function, action_interaction_matrix, reward_type, random_state, err, description\",\n+ invalid_input_of_action_interaction_exponential_reward_function,\n)\n-def test_slot_weighted_reward_function_using_invalid_input(\n+def test_action_interaction_exponential_reward_function_using_invalid_input(\ncontext,\naction_context,\naction,\nbase_reward_function,\n- slot_weight_matrix,\n+ action_interaction_matrix,\nreward_type,\nrandom_state,\nerr,\ndescription,\n):\nwith pytest.raises(err, match=f\"{description}*\"):\n- _ = slot_weighted_reward_function(\n+ _ = action_interaction_exponential_reward_function(\ncontext=context,\naction_context=action_context,\naction=action,\n- slot_weight_matrix=slot_weight_matrix,\n+ action_interaction_matrix=action_interaction_matrix,\nbase_reward_function=base_reward_function,\nreward_type=reward_type,\nrandom_state=random_state,\n)\n-# context, action_context, action, base_reward_function, slot_weight_matrix, reward_type, random_state, description\n-valid_input_of_slot_weighted_reward_function = [\n+# context, action_context, action, base_reward_function, action_interaction_matrix, reward_type, random_state, description\n+valid_input_of_action_interaction_exponential_reward_function = [\n(\nnp.ones([5, 2]),\nnp.ones([4, 2]),\n@@ -205,31 +205,31 @@ valid_input_of_slot_weighted_reward_function = [\[email protected](\n- \"context, action_context, action, base_reward_function, slot_weight_matrix, reward_type, random_state, description\",\n- valid_input_of_slot_weighted_reward_function,\n+ \"context, action_context, action, base_reward_function, action_interaction_matrix, reward_type, random_state, description\",\n+ valid_input_of_action_interaction_exponential_reward_function,\n)\n-def test_slot_weighted_reward_function_using_valid_input(\n+def test_action_interaction_exponential_reward_function_using_valid_input(\ncontext,\naction_context,\naction,\nbase_reward_function,\n- slot_weight_matrix,\n+ action_interaction_matrix,\nreward_type,\nrandom_state,\ndescription,\n):\n- expected_reward_factual = slot_weighted_reward_function(\n+ expected_reward_factual = action_interaction_exponential_reward_function(\ncontext=context,\naction_context=action_context,\naction=action,\n- slot_weight_matrix=slot_weight_matrix,\n+ action_interaction_matrix=action_interaction_matrix,\nbase_reward_function=base_reward_function,\nreward_type=reward_type,\nrandom_state=random_state,\n)\nassert expected_reward_factual.shape == (\ncontext.shape[0],\n- slot_weight_matrix.shape[0],\n+ action_interaction_matrix.shape[0],\n)\nif reward_type == \"binary\":\nassert np.all(0 <= expected_reward_factual) and np.all(\n@@ -237,8 +237,8 @@ def test_slot_weighted_reward_function_using_valid_input(\n)\n-# context, action_context, action, base_reward_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\n-invalid_input_of_action_effect_reward_function = [\n+# context, action_context, action, base_reward_function, action_interaction_matrix, reward_type, is_cascade, len_list, random_state, err, description\n+invalid_input_of_action_interaction_reward_function = [\n(\nnp.array([5, 2]),\nnp.ones([4, 2]),\n@@ -321,15 +321,15 @@ invalid_input_of_action_effect_reward_function = [\[email protected](\n- \"context, action_context, action, base_reward_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\",\n- invalid_input_of_action_effect_reward_function,\n+ \"context, action_context, action, base_reward_function, action_interaction_matrix, reward_type, is_cascade, len_list, random_state, err, description\",\n+ invalid_input_of_action_interaction_reward_function,\n)\n-def test_action_effect_reward_function_using_invalid_input(\n+def test_action_interaction_reward_function_using_invalid_input(\ncontext,\naction_context,\naction,\nbase_reward_function,\n- action_effect_matrix,\n+ action_interaction_matrix,\nreward_type,\nis_cascade,\nlen_list,\n@@ -338,11 +338,11 @@ def test_action_effect_reward_function_using_invalid_input(\ndescription,\n):\nwith pytest.raises(err, match=f\"{description}*\"):\n- _ = action_effect_additive_reward_function(\n+ _ = action_interaction_additive_reward_function(\ncontext=context,\naction_context=action_context,\naction=action,\n- action_effect_matrix=action_effect_matrix,\n+ action_interaction_matrix=action_interaction_matrix,\nbase_reward_function=base_reward_function,\nreward_type=reward_type,\nrandom_state=random_state,\n@@ -351,8 +351,8 @@ def test_action_effect_reward_function_using_invalid_input(\n)\n-# context, action_context, action, base_reward_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, err, description\n-valid_input_of_action_effect_reward_function = [\n+# context, action_context, action, base_reward_function, action_interaction_matrix, reward_type, is_cascade, len_list, random_state, err, description\n+valid_input_of_action_interaction_reward_function = [\n(\nnp.ones([5, 2]),\nnp.ones([4, 2]),\n@@ -405,26 +405,26 @@ valid_input_of_action_effect_reward_function = [\[email protected](\n- \"context, action_context, action, base_reward_function, action_effect_matrix, reward_type, is_cascade, len_list, random_state, description\",\n- valid_input_of_action_effect_reward_function,\n+ \"context, action_context, action, base_reward_function, action_interaction_matrix, reward_type, is_cascade, len_list, random_state, description\",\n+ valid_input_of_action_interaction_reward_function,\n)\n-def test_action_effect_reward_function_using_valid_input(\n+def test_action_interaction_reward_function_using_valid_input(\ncontext,\naction_context,\naction,\nbase_reward_function,\n- action_effect_matrix,\n+ action_interaction_matrix,\nreward_type,\nis_cascade,\nlen_list,\nrandom_state,\ndescription,\n):\n- expected_reward_factual = action_effect_additive_reward_function(\n+ expected_reward_factual = action_interaction_additive_reward_function(\ncontext=context,\naction_context=action_context,\naction=action,\n- action_effect_matrix=action_effect_matrix,\n+ action_interaction_matrix=action_interaction_matrix,\nbase_reward_function=base_reward_function,\nreward_type=reward_type,\nrandom_state=random_state,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | unify slot_weight and action_effect -> action_interaction |
641,006 | 10.04.2021 14:41:34 | -32,400 | 4d96d7589935f865a79276e6c39f895c5fb25b77 | n_actions -> n_unique_action | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -30,7 +30,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nParameters\n-----------\n- n_actions: int (>= len_list)\n+ n_unique_action: int (>= len_list)\nNumber of actions.\nlen_list: int (> 1)\n@@ -48,7 +48,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreward_structure: str, default='cascade_additive'\nType of reward structure, which must be either 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', 'standard_exponential'.\n- When 'cascade_additive' or 'standard_additive' is given, additive action_interaction_matrix (:math:`W \\\\in \\\\mathbb{R}^{\\\\text{n_actions} \\\\times \\\\text{n_actions}}`) is generated.\n+ When 'cascade_additive' or 'standard_additive' is given, additive action_interaction_matrix (:math:`W \\\\in \\\\mathbb{R}^{\\\\text{n_unique_action} \\\\times \\\\text{n_unique_action}}`) is generated.\nWhen 'cascade_exponential', 'standard_exponential', or 'independent' is given, exponential action_interaction_matrix (:math:`\\\\in \\\\mathbb{R}^{\\\\text{len_list} \\\\times \\\\text{len_list}}`) is generated.\nExpected reward is calculated as follows (:math:`f` is a base reward function of each item-position, and :math:`g` is a transform function):\n'cascade_additive': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j < k} W(a(k), a(j)))`.\n@@ -101,7 +101,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n# generate synthetic contextual bandit feedback with 10 actions.\n>>> dataset = SyntheticSlateBanditDataset(\n- n_actions=10,\n+ n_unique_action=10,\ndim_context=5,\nbase_reward_function=logistic_reward_function,\nbehavior_policy_function=linear_behavior_policy,\n@@ -117,7 +117,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n>>> bandit_feedback\n{\n'n_rounds': 5,\n- 'n_actions': 10,\n+ 'n_unique_action': 10,\n'slate_id': array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]),\n'context': array([[-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057],\n[ 1.39340583, 0.09290788, 0.28174615, 0.76902257, 1.24643474],\n@@ -153,7 +153,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n\"\"\"\n- n_actions: int\n+ n_unique_action: int\nlen_list: int\ndim_context: int = 1\nreward_type: str = \"binary\"\n@@ -173,17 +173,17 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not isinstance(self.n_actions, int) or self.n_actions <= 1:\n+ if not isinstance(self.n_unique_action, int) or self.n_unique_action <= 1:\nraise ValueError(\n- f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n+ f\"n_unique_action must be an integer larger than 1, but {self.n_unique_action} is given\"\n)\nif (\nnot isinstance(self.len_list, int)\nor self.len_list <= 1\n- or self.len_list > self.n_actions\n+ or self.len_list > self.n_unique_action\n):\nraise ValueError(\n- f\"len_list must be an integer such that 1 < len_list <= n_actions, but {self.len_list} is given\"\n+ f\"len_list must be an integer such that 1 < len_list <= n_unique_action, but {self.len_list} is given\"\n)\nif not isinstance(self.dim_context, int) or self.dim_context <= 0:\nraise ValueError(\n@@ -220,9 +220,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nif self.click_model != \"pbm\":\nself.exam_weight = np.ones(self.len_list)\nif self.reward_structure in [\"cascade_additive\", \"standard_additive\"]:\n- # generate additive action interaction matrix of (n_actions, n_actions)\n+ # generate additive action interaction matrix of (n_unique_action, n_unique_action)\nself.action_interaction_matrix = generate_symmetric_matrix(\n- n_actions=self.n_actions, random_state=self.random_state\n+ n_unique_action=self.n_unique_action, random_state=self.random_state\n)\nif self.base_reward_function is not None:\nself.reward_function = action_interaction_additive_reward_function\n@@ -241,13 +241,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nelse:\nself.action_interaction_matrix = np.identity(self.len_list)\nif self.behavior_policy_function is None:\n- self.behavior_policy = np.ones(self.n_actions) / self.n_actions\n+ self.behavior_policy = np.ones(self.n_unique_action) / self.n_unique_action\nif self.reward_type == \"continuous\":\nself.reward_min = 0\nself.reward_max = 1e10\nself.reward_std = 1.0\n# one-hot encoding representations characterizing each action\n- self.action_context = np.eye(self.n_actions, dtype=int)\n+ self.action_context = np.eye(self.n_unique_action, dtype=int)\n@staticmethod\ndef obtain_standard_exponential_slot_weight(len_list):\n@@ -278,7 +278,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself, action_list: List[int], behavior_policy_logit_i_: np.ndarray\n) -> float:\n\"\"\"Calculate pscore_item_position\"\"\"\n- unique_action_set = np.arange(self.n_actions)\n+ unique_action_set = np.arange(self.n_unique_action)\npscore_ = 1.0\nfor action in action_list:\nscore_ = softmax(behavior_policy_logit_i_[:, unique_action_set])[0]\n@@ -308,7 +308,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn_pscore_item_position: bool, default=True\nA boolean parameter whether `pscore_item_position` is returned or not.\n- When `n_actions` and `len_list` are large, this parameter should be set to False because of the computational time\n+ When `n_unique_action` and `len_list` are large, this parameter should be set to False because of the computational time\nreturn_exact_uniform_pscore_item_position: bool, default=False\nA boolean parameter whether `pscore_item_position` of uniform random policy is returned or not.\n@@ -317,19 +317,19 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nReturns\n----------\n- action: array-like, shape (n_actions * len_list)\n+ action: array-like, shape (n_unique_action * len_list)\nSampled action.\nAction list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`]\n- pscore_cascade: array-like, shape (n_actions * len_list)\n+ pscore_cascade: array-like, shape (n_unique_action * len_list)\nJoint action choice probabilities above the slot (:math:`k`) in each slate given context (:math:`x`).\ni.e., :math:`\\\\pi_k: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{k})`.\n- pscore: array-like, shape (n_actions * len_list)\n+ pscore: array-like, shape (n_unique_action * len_list)\nJoint action choice probabilities of the slate given context (:math:`x`).\ni.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{\\\\text{len_list}})`.\n- pscore_item_position: array-like, shape (n_actions * len_list)\n+ pscore_item_position: array-like, shape (n_unique_action * len_list)\nMarginal action choice probabilities of each slot given context (:math:`x`).\ni.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n@@ -346,7 +346,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndesc=\"[sample_action_and_obtain_pscore]\",\ntotal=n_rounds,\n):\n- unique_action_set = np.arange(self.n_actions)\n+ unique_action_set = np.arange(self.n_unique_action)\npscore_i = 1.0\nfor position_ in np.arange(self.len_list):\nscore_ = softmax(behavior_policy_logit_[i : i + 1, unique_action_set])[\n@@ -369,12 +369,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nif return_pscore_item_position:\nif return_exact_uniform_pscore_item_position:\npscore_item_position[i * self.len_list + position_] = (\n- self.len_list / self.n_actions\n+ self.len_list / self.n_unique_action\n)\nelse:\npscore_item_position_i_l = 0.0\nfor action_list in permutations(\n- range(self.n_actions), self.len_list\n+ range(self.n_unique_action), self.len_list\n):\nif sampled_action_index not in action_list:\ncontinue\n@@ -396,7 +396,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndef sample_contextfree_expected_reward(self) -> np.ndarray:\n\"\"\"Sample expected reward for each action and slot from the uniform distribution\"\"\"\n- return self.random_.uniform(size=(self.n_actions, self.len_list))\n+ return self.random_.uniform(size=(self.n_unique_action, self.len_list))\ndef sample_reward_given_expected_reward(\nself, expected_reward_factual: np.ndarray\n@@ -410,7 +410,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nReturns\n----------\n- reward: array-like, shape (n_actions, len_list)\n+ reward: array-like, shape (n_unique_action, len_list)\n\"\"\"\nexpected_reward_factual = expected_reward_factual * self.exam_weight\n@@ -467,7 +467,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn_pscore_item_position: bool, default=True\nA boolean parameter whether `pscore_item_position` is returned or not.\n- When `n_actions` and `len_list` are large, this parameter should be set to False because of the computational time\n+ When `n_unique_action` and `len_list` are large, this parameter should be set to False because of the computational time\nreturn_exact_uniform_pscore_item_position: bool, default=False\nA boolean parameter whether `pscore_item_position` of uniform random policy is returned or not.\n@@ -505,7 +505,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n# check the shape of behavior_policy_logit_\nif not (\nisinstance(behavior_policy_logit_, np.ndarray)\n- and behavior_policy_logit_.shape == (n_rounds, self.n_actions)\n+ and behavior_policy_logit_.shape == (n_rounds, self.n_unique_action)\n):\nraise ValueError(\"behavior_policy_logit_ is Invalid\")\n# sample action and pscores\n@@ -560,7 +560,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn dict(\nn_rounds=n_rounds,\n- n_actions=self.n_actions,\n+ n_unique_action=self.n_unique_action,\nslate_id=np.repeat(np.arange(n_rounds), self.len_list),\ncontext=context,\naction_context=self.action_context,\n@@ -574,13 +574,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\n-def generate_symmetric_matrix(n_actions: int, random_state: int) -> np.ndarray:\n+def generate_symmetric_matrix(n_unique_action: int, random_state: int) -> np.ndarray:\n\"\"\"Generate symmetric matrix\nParameters\n-----------\n- n_actions: int (>= len_list)\n+ n_unique_action: int (>= len_list)\nNumber of actions.\nrandom_state: int\n@@ -588,10 +588,10 @@ def generate_symmetric_matrix(n_actions: int, random_state: int) -> np.ndarray:\nReturns\n---------\n- symmetric_matrix: array-like, shape (n_actions, n_actions)\n+ symmetric_matrix: array-like, shape (n_unique_action, n_unique_action)\n\"\"\"\nrandom_ = check_random_state(random_state)\n- base_matrix = random_.normal(size=(n_actions, n_actions))\n+ base_matrix = random_.normal(size=(n_unique_action, n_unique_action))\nsymmetric_matrix = (\nnp.tril(base_matrix) + np.tril(base_matrix).T - np.diag(base_matrix.diagonal())\n)\n@@ -617,10 +617,10 @@ def action_interaction_additive_reward_function(\ncontext: array-like, shape (n_rounds, dim_context)\nContext vectors characterizing each round (such as user information).\n- action_context: array-like, shape (n_actions, dim_action_context)\n+ action_context: array-like, shape (n_unique_action, dim_action_context)\nVector representation for each action.\n- action: array-like, shape (n_actions * len_list)\n+ action: array-like, shape (n_unique_action * len_list)\nSampled action.\nAction list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`]\n@@ -634,7 +634,7 @@ def action_interaction_additive_reward_function(\nType of reward variable, which must be either 'binary' or 'continuous'.\nWhen 'binary' is given, expected reward is transformed by logit function.\n- action_interaction_matrix (`W`): array-like, shape (n_actions, n_actions)\n+ action_interaction_matrix (`W`): array-like, shape (n_unique_action, n_unique_action)\n`W(i, j)` is the interaction term between action `i` and `j`.\nlen_list: int (> 1)\n@@ -687,7 +687,7 @@ def action_interaction_additive_reward_function(\n# action_2d: array-like, shape (n_rounds, len_list)\naction_2d = action.reshape((context.shape[0], len_list))\n- # expected_reward: array-like, shape (n_rounds, n_actions)\n+ # expected_reward: array-like, shape (n_rounds, n_unique_action)\nexpected_reward = base_reward_function(\ncontext=context, action_context=action_context, random_state=random_state\n)\n@@ -734,10 +734,10 @@ def action_interaction_exponential_reward_function(\ncontext: array-like, shape (n_rounds, dim_context)\nContext vectors characterizing each round (such as user information).\n- action_context: array-like, shape (n_actions, dim_action_context)\n+ action_context: array-like, shape (n_unique_action, dim_action_context)\nVector representation for each action.\n- action: array-like, shape (n_actions * len_list)\n+ action: array-like, shape (n_unique_action * len_list)\nSampled action.\nAction list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`]\n@@ -785,21 +785,21 @@ def action_interaction_exponential_reward_function(\n)\n# action_2d: array-like, shape (n_rounds, len_list)\naction_2d = action.reshape((context.shape[0], action_interaction_matrix.shape[0]))\n- # action_3d: array-like, shape (n_rounds, n_actions, len_list)\n+ # action_3d: array-like, shape (n_rounds, n_unique_action, len_list)\naction_3d = np.identity(action_context.shape[0])[action_2d].transpose(0, 2, 1)\n- # expected_reward: array-like, shape (n_rounds, n_actions)\n+ # expected_reward: array-like, shape (n_rounds, n_unique_action)\nexpected_reward = base_reward_function(\ncontext=context, action_context=action_context, random_state=random_state\n)\nif reward_type == \"binary\":\nexpected_reward = np.log(expected_reward / (1 - expected_reward))\n- # expected_reward_3d: array-like, shape (n_rounds, n_actions, len_list)\n+ # expected_reward_3d: array-like, shape (n_rounds, n_unique_action, len_list)\nexpected_reward_3d = np.tile(\nexpected_reward, (action_interaction_matrix.shape[0], 1, 1)\n).transpose(1, 2, 0)\n- # action_weight: array-like, shape (n_rounds, n_actions, len_list)\n+ # action_weight: array-like, shape (n_rounds, n_unique_action, len_list)\naction_weight = action_3d @ action_interaction_matrix\n- # weighted_expected_reward: array-like, shape (n_rounds, n_actions, len_list)\n+ # weighted_expected_reward: array-like, shape (n_rounds, n_unique_action, len_list)\nweighted_expected_reward = action_weight * expected_reward_3d\n# expected_reward_factual: list, shape (n_rounds, len_list)\nexpected_reward_factual = weighted_expected_reward.sum(axis=1)\n@@ -828,7 +828,7 @@ def linear_behavior_policy_logit(\ncontext: array-like, shape (n_rounds, dim_context)\nContext vectors characterizing each round (such as user information).\n- action_context: array-like, shape (n_actions, dim_action_context)\n+ action_context: array-like, shape (n_unique_action, dim_action_context)\nVector representation for each action.\nrandom_state: int, default=None\n@@ -840,7 +840,7 @@ def linear_behavior_policy_logit(\nReturns\n---------\n- logit value: array-like, shape (n_rounds, n_actions)\n+ logit value: array-like, shape (n_rounds, n_unique_action)\nLogit given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -13,10 +13,10 @@ from obp.dataset import (\nfrom obp.types import BanditFeedback\n-# n_actions, len_list, dim_context, reward_type, random_state, description\n+# n_unique_action, len_list, dim_context, reward_type, random_state, description\ninvalid_input_of_init = [\n- (\"4\", 3, 2, \"binary\", 1, \"n_actions must be an integer larger than 1\"),\n- (1, 3, 2, \"binary\", 1, \"n_actions must be an integer larger than 1\"),\n+ (\"4\", 3, 2, \"binary\", 1, \"n_unique_action must be an integer larger than 1\"),\n+ (1, 3, 2, \"binary\", 1, \"n_unique_action must be an integer larger than 1\"),\n(5, \"4\", 2, \"binary\", 1, \"len_list must be an integer such that\"),\n(5, -1, 2, \"binary\", 1, \"len_list must be an integer such that\"),\n(5, 10, 2, \"binary\", 1, \"len_list must be an integer such that\"),\n@@ -29,15 +29,15 @@ invalid_input_of_init = [\[email protected](\n- \"n_actions, len_list, dim_context, reward_type, random_state, description\",\n+ \"n_unique_action, len_list, dim_context, reward_type, random_state, description\",\ninvalid_input_of_init,\n)\ndef test_synthetic_slate_init_using_invalid_inputs(\n- n_actions, len_list, dim_context, reward_type, random_state, description\n+ n_unique_action, len_list, dim_context, reward_type, random_state, description\n):\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n_ = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n+ n_unique_action=n_unique_action,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n@@ -124,14 +124,14 @@ def check_slate_bandit_feedback(bandit_feedback: BanditFeedback):\ndef test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy():\n# set parameters\n- n_actions = 10\n+ n_unique_action = 10\nlen_list = 3\ndim_context = 2\nreward_type = \"binary\"\nrandom_state = 12345\nn_rounds = 100\ndataset = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n+ n_unique_action=n_unique_action,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n@@ -150,7 +150,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\nfor column in [\"slate_id\", \"position\", \"action\"] + pscore_columns:\nbandit_feedback_df[column] = bandit_feedback[column]\n# check pscore marginal\n- pscore_item_position = float(len_list / n_actions)\n+ pscore_item_position = float(len_list / n_unique_action)\nassert np.allclose(\nbandit_feedback_df[\"pscore_item_position\"].unique(), [pscore_item_position]\n), f\"pscore_item_position must be [{pscore_item_position}], but {bandit_feedback_df['pscore_item_position'].unique()}\"\n@@ -158,7 +158,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\npscore_cascade = []\npscore_above = 1.0\nfor position_ in np.arange(len_list):\n- pscore_above = pscore_above * 1.0 / (n_actions - position_)\n+ pscore_above = pscore_above * 1.0 / (n_unique_action - position_)\npscore_cascade.append(pscore_above)\nassert np.allclose(\nbandit_feedback_df[\"pscore_cascade\"], np.tile(pscore_cascade, n_rounds)\n@@ -170,14 +170,14 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\ndef test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy_largescale():\n# set parameters\n- n_actions = 100\n+ n_unique_action = 100\nlen_list = 10\ndim_context = 2\nreward_type = \"binary\"\nrandom_state = 12345\nn_rounds = 10000\ndataset = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n+ n_unique_action=n_unique_action,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n@@ -190,7 +190,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\n# check slate bandit feedback (common test)\ncheck_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n# check pscore marginal\n- pscore_item_position = float(len_list / n_actions)\n+ pscore_item_position = float(len_list / n_unique_action)\nassert np.allclose(\nnp.unique(bandit_feedback[\"pscore_item_position\"]), [pscore_item_position]\n), f\"pscore_item_position must be [{pscore_item_position}], but {np.unique(bandit_feedback['pscore_item_position'])}\"\n@@ -198,14 +198,14 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\ndef test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy():\n# set parameters\n- n_actions = 10\n+ n_unique_action = 10\nlen_list = 3\ndim_context = 2\nreward_type = \"binary\"\nrandom_state = 12345\nn_rounds = 100\ndataset = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n+ n_unique_action=n_unique_action,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n@@ -232,14 +232,14 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\ndef test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy_without_pscore_item_position():\n# set parameters\n- n_actions = 80\n+ n_unique_action = 80\nlen_list = 3\ndim_context = 2\nreward_type = \"binary\"\nrandom_state = 12345\nn_rounds = 100\ndataset = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n+ n_unique_action=n_unique_action,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n@@ -258,7 +258,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\n# random seed should be fixed\ndataset2 = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n+ n_unique_action=n_unique_action,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n@@ -280,7 +280,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nassert set(np.unique(bandit_feedback[\"reward\"])) == set([0, 1])\n-# n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\n+# n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\nvalid_input_of_obtain_batch_bandit_feedback = [\n(\n10,\n@@ -601,11 +601,11 @@ valid_input_of_obtain_batch_bandit_feedback = [\[email protected](\n- \"n_actions, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\",\n+ \"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\",\nvalid_input_of_obtain_batch_bandit_feedback,\n)\ndef test_synthetic_slate_using_valid_inputs(\n- n_actions,\n+ n_unique_action,\nlen_list,\ndim_context,\nreward_type,\n@@ -620,7 +620,7 @@ def test_synthetic_slate_using_valid_inputs(\ndescription,\n):\ndataset = SyntheticSlateBanditDataset(\n- n_actions=n_actions,\n+ n_unique_action=n_unique_action,\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate_functions.py",
"new_path": "tests/dataset/test_synthetic_slate_functions.py",
"diff": "@@ -14,7 +14,7 @@ from obp.dataset.synthetic_slate import (\ndef test_generate_symmetric_matrix():\n- matrix = generate_symmetric_matrix(n_actions=3, random_state=1)\n+ matrix = generate_symmetric_matrix(n_unique_action=3, random_state=1)\nassert matrix.shape == (3, 3)\nassert np.allclose(matrix, matrix.T)\n@@ -244,7 +244,7 @@ invalid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -257,7 +257,7 @@ invalid_input_of_action_interaction_reward_function = [\nnp.array([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -270,7 +270,7 @@ invalid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.ones([5, 2]),\nlogistic_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -283,7 +283,7 @@ invalid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.random.choice(5),\nlogistic_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -296,7 +296,7 @@ invalid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.ones(10),\nlogistic_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -309,7 +309,7 @@ invalid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_symmetric_matrix(n_actions=3, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=3, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -358,7 +358,7 @@ valid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"binary\",\nTrue,\n3,\n@@ -370,7 +370,7 @@ valid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlinear_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"continuous\",\nTrue,\n3,\n@@ -382,7 +382,7 @@ valid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlogistic_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"binary\",\nFalse,\n3,\n@@ -394,7 +394,7 @@ valid_input_of_action_interaction_reward_function = [\nnp.ones([4, 2]),\nnp.tile(np.arange(3), 5),\nlinear_reward_function,\n- generate_symmetric_matrix(n_actions=4, random_state=1),\n+ generate_symmetric_matrix(n_unique_action=4, random_state=1),\n\"continuous\",\nFalse,\n3,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | n_actions -> n_unique_action |
641,006 | 10.04.2021 14:42:29 | -32,400 | 247bedd57ad07a0ee8afe067a9e3399889f56966 | result -> expected_reward_factual | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -807,12 +807,12 @@ def action_interaction_exponential_reward_function(\nexpected_reward_factual = sigmoid(expected_reward_factual)\n# q_l = \\sum_{a} a3d[i, a, l] q_a + \\sum_{a_1, a_2} delta(a_1, a_2)\n# return: array, shape (n_rounds, len_list)\n- result = np.array(expected_reward_factual)\n- assert result.shape == (\n+ expected_reward_factual = np.array(expected_reward_factual)\n+ assert expected_reward_factual.shape == (\ncontext.shape[0],\naction_interaction_matrix.shape[0],\n- ), f\"response shape must be (n_rounds, len_list), but {result.shape}\"\n- return result\n+ ), f\"response shape must be (n_rounds, len_list), but {expected_reward_factual.shape}\"\n+ return expected_reward_factual\ndef linear_behavior_policy_logit(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | result -> expected_reward_factual |
641,006 | 10.04.2021 15:06:50 | -32,400 | c12b0e05aa004f4b3d211ee54baf0c7b468bf783 | remove exam weight from initialization | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -66,10 +66,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nWhen 'cascade' is given, reward of each slot is sampled using the cascade model.\nWhen using some click model, 'continuous' reward type is unavailable.\n- exam_weight: np.ndarray, default=None\n- Slot-level examination probability.\n- When click_model is 'pbm', exam_weight must be defined.\n-\nbase_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\nFunction generating expected reward for each given action-context pair,\ni.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n@@ -207,18 +203,18 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n\"standard_exponential\",\n]:\nraise ValueError(\n- f\"reward_structure must be either 'RIPS', 'SIPS', or 'IIPS', but {self.reward_structure} is given.'\"\n- )\n- if self.click_model == \"pbm\" and not isinstance(self.exam_weight, np.ndarray):\n- raise ValueError(\n- f\"exam_weight must be ndarray when click model is 'pbm', but {self.exam_weight} is given\"\n+ f\"reward_structure must be either 'RIPS', 'SIPS', or 'IIPS', but {self.reward_structure} is given.\"\n)\n+ # set exam_weight (slot-level examination probability).\n+ # When click_model is 'pbm', exam_weight is :math:`1 / k`, where :math:`k` is the position.\n+ if self.click_model == \"pbm\":\n+ self.exam_weight = 1 / np.arange(1, self.len_list + 1)\n+ else:\n+ self.exam_weight = np.ones(self.len_list)\nif self.click_model is not None and self.reward_type == \"continuous\":\nraise ValueError(\n\"continuous reward type is unavailable when click model is given\"\n)\n- if self.click_model != \"pbm\":\n- self.exam_weight = np.ones(self.len_list)\nif self.reward_structure in [\"cascade_additive\", \"standard_additive\"]:\n# generate additive action interaction matrix of (n_unique_action, n_unique_action)\nself.action_interaction_matrix = generate_symmetric_matrix(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -280,7 +280,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nassert set(np.unique(bandit_feedback[\"reward\"])) == set([0, 1])\n-# n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\n+# n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, behavior_policy_function, reward_function, return_pscore_item_position, description\nvalid_input_of_obtain_batch_bandit_feedback = [\n(\n10,\n@@ -291,7 +291,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_additive\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -306,7 +305,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"independent\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -321,7 +319,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -336,7 +333,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_additive\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -351,7 +347,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"independent\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -366,7 +361,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -383,7 +377,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\nNone,\nNone,\nNone,\n- None,\nFalse,\n\"Random policy and reward function (continuous reward)\",\n),\n@@ -396,7 +389,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_exponential\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -411,7 +403,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_exponential\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -426,7 +417,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_exponential\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -441,7 +431,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_exponential\",\nNone,\n- None,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -456,7 +445,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\n\"cascade\",\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -471,7 +459,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_exponential\",\n\"cascade\",\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -486,7 +473,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_additive\",\n\"cascade\",\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -501,7 +487,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_exponential\",\n\"cascade\",\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -516,7 +501,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"independent\",\n\"cascade\",\n- None,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -531,7 +515,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\n\"pbm\",\n- 1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -546,7 +529,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_exponential\",\n\"pbm\",\n- 1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -561,7 +543,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_additive\",\n\"pbm\",\n- 1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -576,7 +557,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_exponential\",\n\"pbm\",\n- 1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -591,7 +571,6 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"independent\",\n\"pbm\",\n- 1 / np.exp(np.arange(3)),\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -601,7 +580,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\[email protected](\n- \"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, exam_weight, behavior_policy_function, reward_function, return_pscore_item_position, description\",\n+ \"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, behavior_policy_function, reward_function, return_pscore_item_position, description\",\nvalid_input_of_obtain_batch_bandit_feedback,\n)\ndef test_synthetic_slate_using_valid_inputs(\n@@ -613,7 +592,6 @@ def test_synthetic_slate_using_valid_inputs(\nn_rounds,\nreward_structure,\nclick_model,\n- exam_weight,\nbehavior_policy_function,\nreward_function,\nreturn_pscore_item_position,\n@@ -626,7 +604,6 @@ def test_synthetic_slate_using_valid_inputs(\nreward_type=reward_type,\nreward_structure=reward_structure,\nclick_model=click_model,\n- exam_weight=exam_weight,\nrandom_state=random_state,\nbehavior_policy_function=behavior_policy_function,\nbase_reward_function=reward_function,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | remove exam weight from initialization |
641,006 | 10.04.2021 15:30:09 | -32,400 | 42be0a52a82769ddb59695f6f6b5aba3ed519138 | add input validation of slate dataset | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -205,6 +205,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"reward_structure must be either 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', or 'standard_exponential', but {self.reward_structure} is given.\"\n)\n+ if self.click_model not in [\"cascade\", \"pbm\", None]:\n+ raise ValueError(\n+ f\"click_model must be either 'cascade', 'pbm', or None, but {self.click_model} is given.\"\n+ )\n# set exam_weight (slot-level examination probability).\n# When click_model is 'pbm', exam_weight is :math:`1 / k`, where :math:`k` is the position.\nif self.click_model == \"pbm\":\n@@ -484,7 +488,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nand self.behavior_policy_function is not None\n):\nraise ValueError(\n- \"when return_exact_uniform_pscore_item_position is True, behavior_policy_function must be specified\"\n+ \"when return_exact_uniform_pscore_item_position is True, behavior_policy_function must not be specified (must be random)\"\n)\ncontext = self.random_.normal(size=(n_rounds, self.dim_context))\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -13,27 +13,99 @@ from obp.dataset import (\nfrom obp.types import BanditFeedback\n-# n_unique_action, len_list, dim_context, reward_type, random_state, description\n+# n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description\ninvalid_input_of_init = [\n- (\"4\", 3, 2, \"binary\", 1, \"n_unique_action must be an integer larger than 1\"),\n- (1, 3, 2, \"binary\", 1, \"n_unique_action must be an integer larger than 1\"),\n- (5, \"4\", 2, \"binary\", 1, \"len_list must be an integer such that\"),\n- (5, -1, 2, \"binary\", 1, \"len_list must be an integer such that\"),\n- (5, 10, 2, \"binary\", 1, \"len_list must be an integer such that\"),\n- (5, 3, 0, \"binary\", 1, \"dim_context must be a positive integer\"),\n- (5, 3, \"2\", \"binary\", 1, \"dim_context must be a positive integer\"),\n- (5, 3, 2, \"aaa\", 1, \"reward_type must be either\"),\n- (5, 3, 2, \"binary\", \"x\", \"random_state must be an integer\"),\n- (5, 3, 2, \"binary\", None, \"random_state must be an integer\"),\n+ (\n+ \"4\",\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1,\n+ \"n_unique_action must be an integer larger than 1\",\n+ ),\n+ (\n+ 1,\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1,\n+ \"n_unique_action must be an integer larger than 1\",\n+ ),\n+ (\n+ 5,\n+ \"4\",\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1,\n+ \"len_list must be an integer such that\",\n+ ),\n+ (\n+ 5,\n+ -1,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1,\n+ \"len_list must be an integer such that\",\n+ ),\n+ (\n+ 5,\n+ 10,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1,\n+ \"len_list must be an integer such that\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ 0,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1,\n+ \"dim_context must be a positive integer\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ \"2\",\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1,\n+ \"dim_context must be a positive integer\",\n+ ),\n+ (5, 3, 2, \"aaa\", \"independent\", \"pbm\", 1, \"reward_type must be either\"),\n+ (5, 3, 2, \"binary\", \"aaa\", \"pbm\", 1, \"reward_structure must be either\"),\n+ (5, 3, 2, \"binary\", \"independent\", \"aaa\", 1, \"click_model must be either\"),\n+ (5, 3, 2, \"binary\", \"independent\", \"pbm\", \"x\", \"random_state must be an integer\"),\n+ (5, 3, 2, \"binary\", \"independent\", \"pbm\", None, \"random_state must be an integer\"),\n]\[email protected](\n- \"n_unique_action, len_list, dim_context, reward_type, random_state, description\",\n+ \"n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description\",\ninvalid_input_of_init,\n)\ndef test_synthetic_slate_init_using_invalid_inputs(\n- n_unique_action, len_list, dim_context, reward_type, random_state, description\n+ n_unique_action,\n+ len_list,\n+ dim_context,\n+ reward_type,\n+ reward_structure,\n+ click_model,\n+ random_state,\n+ description,\n):\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n_ = SyntheticSlateBanditDataset(\n@@ -41,6 +113,8 @@ def test_synthetic_slate_init_using_invalid_inputs(\nlen_list=len_list,\ndim_context=dim_context,\nreward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\nrandom_state=random_state,\n)\n@@ -137,7 +211,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\nreward_type=reward_type,\nrandom_state=random_state,\n)\n- # get feedback\n+ # obtain feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n# check slate bandit feedback (common test)\ncheck_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n@@ -183,7 +257,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\nreward_type=reward_type,\nrandom_state=random_state,\n)\n- # get feedback\n+ # obtain feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\nn_rounds=n_rounds, return_exact_uniform_pscore_item_position=True\n)\n@@ -212,7 +286,16 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n)\n- # get feedback\n+ with pytest.raises(ValueError):\n+ _ = dataset.obtain_batch_bandit_feedback(n_rounds=-1)\n+ with pytest.raises(ValueError):\n+ _ = dataset.obtain_batch_bandit_feedback(n_rounds=\"a\")\n+ with pytest.raises(ValueError):\n+ _ = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_exact_uniform_pscore_item_position=True\n+ )\n+\n+ # obtain feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n# check slate bandit feedback (common test)\ncheck_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n@@ -246,7 +329,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n)\n- # get feedback\n+ # obtain feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\nn_rounds=n_rounds, return_pscore_item_position=False\n)\n@@ -265,7 +348,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n)\n- # get feedback\n+ # obtain feedback\nbandit_feedback2 = dataset2.obtain_batch_bandit_feedback(\nn_rounds=n_rounds, return_pscore_item_position=False\n)\n@@ -608,7 +691,7 @@ def test_synthetic_slate_using_valid_inputs(\nbehavior_policy_function=behavior_policy_function,\nbase_reward_function=reward_function,\n)\n- # get feedback\n+ # obtain feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\nn_rounds=n_rounds, return_pscore_item_position=return_pscore_item_position\n)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add input validation of slate dataset |
641,006 | 10.04.2021 17:14:05 | -32,400 | 9f6992593c209c470cf01d0c4e9cdcf9579ee5c9 | add slate ope estimators | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -7,6 +7,9 @@ from obp.ope.estimators import DoublyRobust\nfrom obp.ope.estimators import SelfNormalizedDoublyRobust\nfrom obp.ope.estimators import SwitchDoublyRobust\nfrom obp.ope.estimators import DoublyRobustWithShrinkage\n+from obp.ope.estimators_slate import SlateStandardIPS\n+from obp.ope.estimators_slate import SlateIndependentIPS\n+from obp.ope.estimators_slate import SlateRecursiveIPS\nfrom obp.ope.meta import OffPolicyEvaluation\nfrom obp.ope.regression_model import RegressionModel\n@@ -22,6 +25,9 @@ __all__ = [\n\"DoublyRobustWithShrinkage\",\n\"OffPolicyEvaluation\",\n\"RegressionModel\",\n+ \"SlateStandardIPS\",\n+ \"SlateIndependentIPS\",\n+ \"SlateRecursiveIPS\",\n]\n__all_estimators__ = [\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "obp/ope/estimators_slate.py",
"diff": "+# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.\n+# Licensed under the Apache 2.0 License.\n+\n+\"\"\"Slate Off-Policy Estimators.\"\"\"\n+from abc import ABCMeta, abstractmethod\n+from dataclasses import dataclass\n+from typing import Dict, Optional\n+\n+import numpy as np\n+from sklearn.utils import check_random_state\n+\n+from ..utils import check_confidence_interval_arguments\n+\n+\n+@dataclass\n+class BaseSlateOffPolicyEstimator(metaclass=ABCMeta):\n+ \"\"\"Base class for OPE estimators.\"\"\"\n+\n+ @abstractmethod\n+ def _estimate_round_rewards(self) -> np.ndarray:\n+ \"\"\"Estimate rewards for each round.\"\"\"\n+ raise NotImplementedError\n+\n+ @abstractmethod\n+ def estimate_policy_value(self) -> float:\n+ \"\"\"Estimate policy value of an evaluation policy.\"\"\"\n+ raise NotImplementedError\n+\n+ @abstractmethod\n+ def estimate_interval(self) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\"\"\"\n+ raise NotImplementedError\n+\n+\n+# TODO :comment\n+@dataclass\n+class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\n+ \"\"\"Base Class of Slate Inverse Probability Weighting.\"\"\"\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ behavior_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ position_weight: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate rewards for each round.\n+\n+ Parameters\n+ ----------\n+ reward: array-like or Tensor, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action: array-like or Tensor, shape (n_rounds,)\n+ Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like or Tensor, shape (n_rounds,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_dist: array-like or Tensor, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ position: array-like or Tensor, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like or Tensor, shape (n_rounds,)\n+ Rewards estimated by IPW for each round.\n+\n+ \"\"\"\n+ reward_weight = np.vectorize(lambda x: position_weight[x])(position)\n+ iw = evaluation_policy_pscore / behavior_policy_pscore\n+ return reward * iw * reward_weight\n+\n+ def _estimate_slate_confidence_interval_by_bootstrap(\n+ self,\n+ slate_id: np.ndarray,\n+ reward: np.ndarray,\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+ ) -> Dict[str, float]:\n+ check_confidence_interval_arguments(\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+ n_unique_slate = np.unique(slate_id).shape[0]\n+ boot_samples = list()\n+ random_ = check_random_state(random_state)\n+ for _ in np.arange(n_bootstrap_samples):\n+ sampled_slate = random_.choice(\n+ np.arange(n_unique_slate), size=n_unique_slate\n+ )\n+ boot_samples.append(reward[sampled_slate].sum() / n_unique_slate)\n+ lower_bound = np.percentile(boot_samples, 100 * (alpha / 2))\n+ upper_bound = np.percentile(boot_samples, 100 * (1.0 - alpha / 2))\n+ return {\n+ \"mean\": np.mean(boot_samples),\n+ f\"{100 * (1. - alpha)}% CI (lower)\": lower_bound,\n+ f\"{100 * (1. - alpha)}% CI (upper)\": upper_bound,\n+ }\n+\n+\n+@dataclass\n+class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\n+ \"\"\"Estimate the policy value by Slate Standard Inverse Probability Scoring (SIPS).\"\"\"\n+\n+ len_list: int\n+ estimator_name: str = \"sips\"\n+ position_weight: Optional[np.ndarray] = None\n+\n+ def __post_init__(self) -> None:\n+ if self.position_weight is None:\n+ self.position_weight = np.ones(self.len_list)\n+ else:\n+ if not isinstance(self.position_weight, np.ndarray):\n+ raise ValueError(\"position weight type\")\n+ if not (\n+ self.position_weight.ndim == 1\n+ and self.position_weight.shape[0] == self.len_list\n+ ):\n+ raise ValueError(\"position weight shape\")\n+\n+ def estimate_policy_value(\n+ self,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ **kwargs,\n+ ) -> float:\n+ return self._estimate_round_rewards(\n+ reward=reward,\n+ position=position,\n+ behavior_policy_pscore=pscore,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ position_weight=self.position_weight,\n+ ).mean()\n+\n+ def estimate_interval(\n+ self,\n+ slate_id: np.ndarray,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+ ) -> Dict[str, float]:\n+ reward = self._estimate_round_rewards(\n+ reward=reward,\n+ position=position,\n+ behavior_policy_pscore=pscore,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ position_weight=self.position_weight,\n+ )\n+ return self._estimate_slate_confidence_interval_by_bootstrap(\n+ slate_id=slate_id, reward=reward\n+ )\n+\n+\n+@dataclass\n+class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\n+ \"\"\"Estimate the policy value by Slate Independent Inverse Probability Scoring (IIPS).\"\"\"\n+\n+ len_list: int\n+ estimator_name: str = \"iips\"\n+ position_weight: Optional[np.ndarray] = None\n+\n+ def __post_init__(self) -> None:\n+ if self.position_weight is None:\n+ self.position_weight = np.ones(self.len_list)\n+ else:\n+ if not isinstance(self.position_weight, np.ndarray):\n+ raise ValueError(\"position weight type\")\n+ if not (\n+ self.position_weight.ndim == 1\n+ and self.position_weight.shape[0] == self.len_list\n+ ):\n+ raise ValueError(\"position weight shape\")\n+\n+ def estimate_policy_value(\n+ self,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore_item_position: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ **kwargs,\n+ ) -> float:\n+ return self._estimate_round_rewards(\n+ reward=reward,\n+ position=position,\n+ behavior_policy_pscore=pscore_item_position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ position_weight=self.position_weight,\n+ ).mean()\n+\n+ def estimate_interval(\n+ self,\n+ slate_id: np.ndarray,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore_item_position: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+ ) -> Dict[str, float]:\n+ reward = self._estimate_round_rewards(\n+ reward=reward,\n+ position=position,\n+ behavior_policy_pscore=pscore_item_position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ position_weight=self.position_weight,\n+ )\n+ return self._estimate_slate_confidence_interval_by_bootstrap(\n+ slate_id=slate_id, reward=reward\n+ )\n+\n+\n+@dataclass\n+class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\n+ \"\"\"Estimate the policy value by Slate Recursive Inverse Probability Scoring (RIPS).\"\"\"\n+\n+ len_list: int\n+ estimator_name: str = \"rips\"\n+ position_weight: Optional[np.ndarray] = None\n+\n+ def __post_init__(self) -> None:\n+ if self.position_weight is None:\n+ self.position_weight = np.ones(self.len_list)\n+ else:\n+ if not isinstance(self.position_weight, np.ndarray):\n+ raise ValueError(\"position weight type\")\n+ if not (\n+ self.position_weight.ndim == 1\n+ and self.position_weight.shape[0] == self.len_list\n+ ):\n+ raise ValueError(\"position weight shape\")\n+\n+ def estimate_policy_value(\n+ self,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore_cascade: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ **kwargs,\n+ ) -> float:\n+ return self._estimate_round_rewards(\n+ reward=reward,\n+ position=position,\n+ behavior_policy_pscore=pscore_cascade,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ position_weight=self.position_weight,\n+ ).mean()\n+\n+ def estimate_interval(\n+ self,\n+ slate_id: np.ndarray,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore_cascade: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+ ) -> Dict[str, float]:\n+ reward = self._estimate_round_rewards(\n+ reward=reward,\n+ position=position,\n+ behavior_policy_pscore=pscore_cascade,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ position_weight=self.position_weight,\n+ )\n+ return self._estimate_slate_confidence_interval_by_bootstrap(\n+ slate_id=slate_id, reward=reward\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add slate ope estimators |
641,006 | 11.04.2021 16:41:18 | -32,400 | 3a7aa95b9d6dbc4d462f83d3d8eb9d286fc9c300 | fix confidence interval of slate ope | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_slate.py",
"new_path": "obp/ope/estimators_slate.py",
"diff": "@@ -75,6 +75,15 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\niw = evaluation_policy_pscore / behavior_policy_pscore\nreturn reward * iw * reward_weight\n+ @staticmethod\n+ def _extract_reward_by_bootstrap(\n+ slate_id: np.ndarray, reward: np.ndarray, sampled_slate: np.ndarray\n+ ) -> np.ndarray:\n+ sampled_reward = list()\n+ for slate in sampled_slate:\n+ sampled_reward.extend(reward[slate_id == slate])\n+ return np.array(sampled_reward)\n+\ndef _estimate_slate_confidence_interval_by_bootstrap(\nself,\nslate_id: np.ndarray,\n@@ -89,14 +98,15 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nrandom_state=random_state,\n)\n- n_unique_slate = np.unique(slate_id).shape[0]\n+ unique_slate = np.unique(slate_id)\nboot_samples = list()\nrandom_ = check_random_state(random_state)\nfor _ in np.arange(n_bootstrap_samples):\n- sampled_slate = random_.choice(\n- np.arange(n_unique_slate), size=n_unique_slate\n+ sampled_slate = random_.choice(unique_slate, size=unique_slate.shape[0])\n+ sampled_reward = self._extract_reward_by_bootstrap(\n+ slate_id=slate_id, reward=reward, sampled_slate=sampled_slate\n)\n- boot_samples.append(reward[sampled_slate].sum() / n_unique_slate)\n+ boot_samples.append(sampled_reward.sum() / unique_slate.shape[0])\nlower_bound = np.percentile(boot_samples, 100 * (alpha / 2))\nupper_bound = np.percentile(boot_samples, 100 * (1.0 - alpha / 2))\nreturn {\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix confidence interval of slate ope |
641,006 | 11.04.2021 17:18:28 | -32,400 | c8df15c7026f93675c4dbf079cf798607c30c7cc | add comment of SIPS | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_slate.py",
"new_path": "obp/ope/estimators_slate.py",
"diff": "@@ -14,7 +14,7 @@ from ..utils import check_confidence_interval_arguments\n@dataclass\nclass BaseSlateOffPolicyEstimator(metaclass=ABCMeta):\n- \"\"\"Base class for OPE estimators.\"\"\"\n+ \"\"\"Base class for Slate OPE estimators.\"\"\"\n@abstractmethod\ndef _estimate_round_rewards(self) -> np.ndarray:\n@@ -35,7 +35,31 @@ class BaseSlateOffPolicyEstimator(metaclass=ABCMeta):\n# TODO :comment\n@dataclass\nclass BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\n- \"\"\"Base Class of Slate Inverse Probability Weighting.\"\"\"\n+ \"\"\"Base Class of Slate Inverse Probability Weighting.\n+\n+ len_list: int (> 1)\n+ Length of a list of actions recommended in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\n+ position_weight: array-like, shape (len_list,)\n+ Non-negative weight for each slot.\n+\n+ \"\"\"\n+\n+ len_list: int\n+ position_weight: Optional[np.ndarray] = None\n+\n+ def __post_init__(self) -> None:\n+ if self.position_weight is None:\n+ self.position_weight = np.ones(self.len_list)\n+ else:\n+ if not isinstance(self.position_weight, np.ndarray):\n+ raise ValueError(\"position weight type\")\n+ if not (\n+ self.position_weight.ndim == 1\n+ and self.position_weight.shape[0] == self.len_list\n+ ):\n+ raise ValueError(\"position weight shape\")\ndef _estimate_round_rewards(\nself,\n@@ -46,52 +70,98 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nposition_weight: np.ndarray,\n**kwargs,\n) -> np.ndarray:\n- \"\"\"Estimate rewards for each round.\n+ \"\"\"Estimate rewards for each round and slot.\nParameters\n----------\n- reward: array-like or Tensor, shape (n_rounds,)\n- Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n- action: array-like or Tensor, shape (n_rounds,)\n- Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n- pscore: array-like or Tensor, shape (n_rounds,)\n+ behavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n- action_dist: array-like or Tensor, shape (n_rounds, n_actions, len_list)\n- Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n- position: array-like or Tensor, shape (n_rounds,), default=None\n- Positions of each round in the given logged bandit feedback.\n+ position_weight: array-like, shape (len_list,)\n+ Non-negative weight for each slot.\nReturns\n----------\n- estimated_rewards: array-like or Tensor, shape (n_rounds,)\n- Rewards estimated by IPW for each round.\n+ estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n+ Rewards estimated by IPW for each round and slot (weighted based on position_weight).\n\"\"\"\nreward_weight = np.vectorize(lambda x: position_weight[x])(position)\niw = evaluation_policy_pscore / behavior_policy_pscore\n- return reward * iw * reward_weight\n+ estimated_rewards = reward * iw * reward_weight\n+ return estimated_rewards\n@staticmethod\ndef _extract_reward_by_bootstrap(\n- slate_id: np.ndarray, reward: np.ndarray, sampled_slate: np.ndarray\n+ slate_id: np.ndarray, estimated_rewards: np.ndarray, sampled_slate: np.ndarray\n) -> np.ndarray:\n- sampled_reward = list()\n+ \"\"\"Extract reward based on sampled slate.\n+\n+ Parameters\n+ ----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n+ Rewards estimated by IPW for each round and slot (weighted based on position_weight).\n+\n+ sampled_slate: array-like, shape (n_rounds,)\n+ Slate id sampled by bootstrap.\n+\n+ Returns\n+ ----------\n+ sampled_estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n+ Estimated rewards sampled by bootstrap\n+\n+ \"\"\"\n+ sampled_estimated_rewards = list()\nfor slate in sampled_slate:\n- sampled_reward.extend(reward[slate_id == slate])\n- return np.array(sampled_reward)\n+ sampled_estimated_rewards.extend(estimated_rewards[slate_id == slate])\n+ sampled_estimated_rewards = np.array(sampled_estimated_rewards)\n+ return sampled_estimated_rewards\ndef _estimate_slate_confidence_interval_by_bootstrap(\nself,\nslate_id: np.ndarray,\n- reward: np.ndarray,\n+ estimated_rewards: np.ndarray,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 10000,\nrandom_state: Optional[int] = None,\n) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval by nonparametric bootstrap-like procedure.\n+\n+ Parameters\n+ ----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n+ Rewards estimated by IPW for each round and slot (weighted based on position_weight).\n+\n+ alpha: float, default=0.05\n+ Significant level of confidence intervals.\n+\n+ n_bootstrap_samples: int, default=10000\n+ Number of resampling performed in the bootstrap procedure.\n+\n+ random_state: int, default=None\n+ Controls the random seed in bootstrap sampling.\n+\n+ Returns\n+ ----------\n+ estimated_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\ncheck_confidence_interval_arguments(\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\n@@ -103,10 +173,12 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nrandom_ = check_random_state(random_state)\nfor _ in np.arange(n_bootstrap_samples):\nsampled_slate = random_.choice(unique_slate, size=unique_slate.shape[0])\n- sampled_reward = self._extract_reward_by_bootstrap(\n- slate_id=slate_id, reward=reward, sampled_slate=sampled_slate\n+ sampled_estimated_rewards = self._extract_reward_by_bootstrap(\n+ slate_id=slate_id,\n+ estimated_rewards=estimated_rewards,\n+ sampled_slate=sampled_slate,\n)\n- boot_samples.append(sampled_reward.sum() / unique_slate.shape[0])\n+ boot_samples.append(sampled_estimated_rewards.sum() / unique_slate.shape[0])\nlower_bound = np.percentile(boot_samples, 100 * (alpha / 2))\nupper_bound = np.percentile(boot_samples, 100 * (1.0 - alpha / 2))\nreturn {\n@@ -118,23 +190,24 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\n@dataclass\nclass SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\n- \"\"\"Estimate the policy value by Slate Standard Inverse Probability Scoring (SIPS).\"\"\"\n+ \"\"\"Estimate the policy value by Slate Standard Inverse Probability Scoring (SIPS).\n- len_list: int\n- estimator_name: str = \"sips\"\n- position_weight: Optional[np.ndarray] = None\n+ Note\n+ -------\n+ Slate Standard Inverse Probability Scoring (SIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e` by\n- def __post_init__(self) -> None:\n- if self.position_weight is None:\n- self.position_weight = np.ones(self.len_list)\n- else:\n- if not isinstance(self.position_weight, np.ndarray):\n- raise ValueError(\"position weight type\")\n- if not (\n- self.position_weight.ndim == 1\n- and self.position_weight.shape[0] == self.len_list\n- ):\n- raise ValueError(\"position weight shape\")\n+ Parameters\n+ ----------\n+ estimator_name: str, default='sips'.\n+ Name of off-policy estimator.\n+\n+ References\n+ ------------\n+ James McInerney, Brian Brost, Praveen Chandar, Rishabh Mehrotra, and Ben Carterette.\n+ \"Counterfactual Evaluation of Slate Recommendations with Sequential Reward Interactions\", 2020.\n+ \"\"\"\n+\n+ estimator_name: str = \"sips\"\ndef estimate_policy_value(\nself,\n@@ -144,6 +217,28 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nevaluation_policy_pscore: np.ndarray,\n**kwargs,\n) -> float:\n+ \"\"\"Estimate policy value of an evaluation policy.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ Returns\n+ ----------\n+ V_hat: float\n+ Estimated policy value (performance) of a given evaluation policy.\n+\n+ \"\"\"\nreturn self._estimate_round_rewards(\nreward=reward,\nposition=position,\n@@ -164,7 +259,41 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> Dict[str, float]:\n- reward = self._estimate_round_rewards(\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\n+\n+ Parameters\n+ ----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ alpha: float, default=0.05\n+ P-value.\n+\n+ n_bootstrap_samples: int, default=10000\n+ Number of resampling performed in the bootstrap procedure.\n+\n+ random_state: int, default=None\n+ Controls the random seed in bootstrap sampling.\n+\n+ Returns\n+ ----------\n+ estimated_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\n+ estimated_rewards = self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore,\n@@ -172,7 +301,7 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nposition_weight=self.position_weight,\n)\nreturn self._estimate_slate_confidence_interval_by_bootstrap(\n- slate_id=slate_id, reward=reward\n+ slate_id=slate_id, estimated_rewards=estimated_rewards\n)\n@@ -180,21 +309,7 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nclass SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\n\"\"\"Estimate the policy value by Slate Independent Inverse Probability Scoring (IIPS).\"\"\"\n- len_list: int\nestimator_name: str = \"iips\"\n- position_weight: Optional[np.ndarray] = None\n-\n- def __post_init__(self) -> None:\n- if self.position_weight is None:\n- self.position_weight = np.ones(self.len_list)\n- else:\n- if not isinstance(self.position_weight, np.ndarray):\n- raise ValueError(\"position weight type\")\n- if not (\n- self.position_weight.ndim == 1\n- and self.position_weight.shape[0] == self.len_list\n- ):\n- raise ValueError(\"position weight shape\")\ndef estimate_policy_value(\nself,\n@@ -224,7 +339,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> Dict[str, float]:\n- reward = self._estimate_round_rewards(\n+ estimated_rewards = self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_item_position,\n@@ -232,7 +347,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nposition_weight=self.position_weight,\n)\nreturn self._estimate_slate_confidence_interval_by_bootstrap(\n- slate_id=slate_id, reward=reward\n+ slate_id=slate_id, estimated_rewards=estimated_rewards\n)\n@@ -240,21 +355,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nclass SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\n\"\"\"Estimate the policy value by Slate Recursive Inverse Probability Scoring (RIPS).\"\"\"\n- len_list: int\nestimator_name: str = \"rips\"\n- position_weight: Optional[np.ndarray] = None\n-\n- def __post_init__(self) -> None:\n- if self.position_weight is None:\n- self.position_weight = np.ones(self.len_list)\n- else:\n- if not isinstance(self.position_weight, np.ndarray):\n- raise ValueError(\"position weight type\")\n- if not (\n- self.position_weight.ndim == 1\n- and self.position_weight.shape[0] == self.len_list\n- ):\n- raise ValueError(\"position weight shape\")\ndef estimate_policy_value(\nself,\n@@ -284,7 +385,7 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> Dict[str, float]:\n- reward = self._estimate_round_rewards(\n+ estimated_rewards = self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_cascade,\n@@ -292,5 +393,5 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nposition_weight=self.position_weight,\n)\nreturn self._estimate_slate_confidence_interval_by_bootstrap(\n- slate_id=slate_id, reward=reward\n+ slate_id=slate_id, estimated_rewards=estimated_rewards\n)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add comment of SIPS |
641,005 | 12.04.2021 17:53:42 | -32,400 | 9220b587b0f91d221151ea463bf54baf3a7534af | fix type check issue in Windows | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -239,7 +239,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nraise ValueError(\n\"the size of axis 0 of context must be the same as that of action\"\n)\n- if not np.issubdtype(int, action.dtype):\n+ if not np.issubdtype(action.dtype, np.integer):\nraise ValueError(\"the dtype of action must be a subdtype of int\")\nexpected_reward_ = self.calc_expected_reward(context)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -173,7 +173,7 @@ def check_bandit_feedback_inputs(\nraise ValueError(\"reward must be ndarray\")\nif reward.ndim != 1:\nraise ValueError(\"reward must be 1-dimensional\")\n- if not (action.dtype == int and action.min() >= 0):\n+ if not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):\nraise ValueError(\"action elements must be non-negative integers\")\nif expected_reward is not None:\n@@ -219,7 +219,7 @@ def check_bandit_feedback_inputs(\nraise ValueError(\n\"context, action, reward, and position must be the same size.\"\n)\n- if not (position.dtype == int and position.min() >= 0):\n+ if not (np.issubdtype(position.dtype, np.integer) and position.min() >= 0):\nraise ValueError(\"position elements must be non-negative integers\")\nelse:\nif not (context.shape[0] == action.shape[0] == reward.shape[0]):\n@@ -287,7 +287,7 @@ def check_ope_inputs(\nraise ValueError(\n\"the first dimension of position and the first dimension of action_dist must be the same\"\n)\n- if not (position.dtype == int and position.min() >= 0):\n+ if not (np.issubdtype(position.dtype, np.integer) and position.min() >= 0):\nraise ValueError(\"position elements must be non-negative integers\")\nif position.max() >= action_dist.shape[2]:\nraise ValueError(\n@@ -319,7 +319,7 @@ def check_ope_inputs(\nraise ValueError(\"reward must be 1-dimensional\")\nif not (action.shape[0] == reward.shape[0]):\nraise ValueError(\"action and reward must be the same size.\")\n- if not (action.dtype == int and action.min() >= 0):\n+ if not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):\nraise ValueError(\"action elements must be non-negative integers\")\nif action.max() >= action_dist.shape[1]:\nraise ValueError(\n"
},
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -12,7 +12,7 @@ print(__version__)\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\nlong_description = f.read()\n-package_data_list = [\"obp/policy/conf/prior_bts.yaml\", \"obp/dataset/obd/\"]\n+package_data_list = [\"obp/policy/conf/prior_bts.yaml\", \"obp/dataset/obd\"]\nsetup(\nname=\"obp\",\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix type check issue in Windows |
641,006 | 17.04.2021 14:07:40 | -32,400 | 8f806155bf55df19401a98ba38b5b1d450c34e09 | apply review of synthetic_slate | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -31,7 +31,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nParameters\n-----------\nn_unique_action: int (>= len_list)\n- Number of actions.\n+ Number of unique actions.\nlen_list: int (> 1)\nLength of a list of actions recommended in each impression.\n@@ -47,7 +47,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nThe mean parameter of the reward distribution is determined by the `reward_function` specified by the next argument.\nreward_structure: str, default='cascade_additive'\n- Type of reward structure, which must be either 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', 'standard_exponential'.\n+ Type of reward structure, which must be one of 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', or 'standard_exponential'.\nWhen 'cascade_additive' or 'standard_additive' is given, additive action_interaction_matrix (:math:`W \\\\in \\\\mathbb{R}^{\\\\text{n_unique_action} \\\\times \\\\text{n_unique_action}}`) is generated.\nWhen 'cascade_exponential', 'standard_exponential', or 'independent' is given, exponential action_interaction_matrix (:math:`\\\\in \\\\mathbb{R}^{\\\\text{len_list} \\\\times \\\\text{len_list}}`) is generated.\nExpected reward is calculated as follows (:math:`f` is a base reward function of each item-position, and :math:`g` is a transform function):\n@@ -60,7 +60,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nWhen reward_type is 'binray', transform function is the logit function.\nclick_model: str, default=None\n- Type of click model, which must be either None, 'pbm', 'cascade'.\n+ Type of click model, which must be one of None, 'pbm', or 'cascade'.\nWhen None is given, reward of each slot is sampled based on the expected reward of the slot.\nWhen 'pbm' is given, reward of each slot is sampled based on the position-based model.\nWhen 'cascade' is given, reward of each slot is sampled based on the cascade model.\n@@ -87,7 +87,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n.. code-block:: python\n- >>> import numpy as np\n>>> from obp.dataset import (\nlogistic_reward_function,\nlinear_behavior_policy_logit,\n@@ -98,12 +97,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n>>> dataset = SyntheticSlateBanditDataset(\nn_unique_action=10,\ndim_context=5,\n+ len_list=3,\nbase_reward_function=logistic_reward_function,\n- behavior_policy_function=linear_behavior_policy,\n+ behavior_policy_function=linear_behavior_policy_logit,\nreward_type='binary',\nreward_structure='cascade_additive',\nclick_model='cascade',\n- exam_weight=None,\nrandom_state=12345\n)\n>>> bandit_feedback = dataset.obtain_batch_bandit_feedback(\n@@ -154,7 +153,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreward_type: str = \"binary\"\nreward_structure: str = \"cascade_additive\"\nclick_model: Optional[str] = None\n- exam_weight: Optional[np.ndarray] = None\nbase_reward_function: Optional[\nCallable[\n[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray\n@@ -202,11 +200,11 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n\"standard_exponential\",\n]:\nraise ValueError(\n- f\"reward_structure must be either 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', or 'standard_exponential', but {self.reward_structure} is given.\"\n+ f\"reward_structure must be one of 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', or 'standard_exponential', but {self.reward_structure} is given.\"\n)\nif self.click_model not in [\"cascade\", \"pbm\", None]:\nraise ValueError(\n- f\"click_model must be either 'cascade', 'pbm', or None, but {self.click_model} is given.\"\n+ f\"click_model must be one of 'cascade', 'pbm', or None, but {self.click_model} is given.\"\n)\n# set exam_weight (slot-level examination probability).\n# When click_model is 'pbm', exam_weight is :math:`1 / k`, where :math:`k` is the position.\n@@ -231,11 +229,15 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n# generate exponential action interaction matrix of (len_list, len_list)\nif self.reward_structure == \"standard_exponential\":\nself.action_interaction_matrix = (\n- self.obtain_standard_exponential_slot_weight(self.len_list)\n+ self.obtain_standard_exponential_action_interaction_matrix(\n+ self.len_list\n+ )\n)\nelif self.reward_structure == \"cascade_exponential\":\nself.action_interaction_matrix = (\n- self.obtain_cascade_exponential_slot_weight(self.len_list)\n+ self.obtain_cascade_exponential_action_interaction_matrix(\n+ self.len_list\n+ )\n)\nelse:\nself.action_interaction_matrix = np.identity(self.len_list)\n@@ -249,8 +251,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself.action_context = np.eye(self.n_unique_action, dtype=int)\n@staticmethod\n- def obtain_standard_exponential_slot_weight(len_list):\n- \"\"\"Obtain slot weight matrix for standard exponential reward structure (symmetric matrix)\"\"\"\n+ def obtain_standard_exponential_action_interaction_matrix(len_list) -> np.ndarray:\n+ \"\"\"Obtain action interaction matrix for standard exponential reward structure (symmetric matrix)\"\"\"\naction_interaction_matrix = np.identity(len_list)\nfor position_ in np.arange(len_list):\naction_interaction_matrix[:, position_] = -1 / np.exp(\n@@ -260,8 +262,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn action_interaction_matrix\n@staticmethod\n- def obtain_cascade_exponential_slot_weight(len_list):\n- \"\"\"Obtain slot weight matrix for cascade exponential reward structure (upper triangular matrix)\"\"\"\n+ def obtain_cascade_exponential_action_interaction_matrix(len_list) -> np.ndarray:\n+ \"\"\"Obtain action interaction matrix for cascade exponential reward structure (upper triangular matrix)\"\"\"\naction_interaction_matrix = np.identity(len_list)\nfor position_ in np.arange(len_list):\naction_interaction_matrix[:, position_] = -1 / np.exp(\n@@ -411,7 +413,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreward: array-like, shape (n_unique_action, len_list)\n\"\"\"\n- expected_reward_factual = expected_reward_factual * self.exam_weight\n+ expected_reward_factual *= self.exam_weight\nif self.reward_type == \"binary\":\nreward = np.array(\n[\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | apply review of synthetic_slate |
641,006 | 17.04.2021 14:48:35 | -32,400 | 8066412f7ac18e8d913fdb29c0424cc8defa5f69 | fix bugs; apply review of test_synthetic_slate | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -34,7 +34,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nNumber of unique actions.\nlen_list: int (> 1)\n- Length of a list of actions recommended in each impression.\n+ Length of a list of actions recommended in each slate.\nWhen Open Bandit Dataset is used, 3 should be set.\ndim_context: int, default=1\n@@ -209,9 +209,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n# set exam_weight (slot-level examination probability).\n# When click_model is 'pbm', exam_weight is :math:`1 / k`, where :math:`k` is the position.\nif self.click_model == \"pbm\":\n- self.exam_weight = 1 / np.arange(1, self.len_list + 1)\n+ self.exam_weight = 1.0 / np.arange(1, self.len_list + 1)\nelse:\n- self.exam_weight = np.ones(self.len_list)\n+ self.exam_weight = np.ones(self.len_list, dtype=float)\nif self.click_model is not None and self.reward_type == \"continuous\":\nraise ValueError(\n\"continuous reward type is unavailable when click model is given\"\n@@ -638,7 +638,7 @@ def action_interaction_additive_reward_function(\n`W(i, j)` is the interaction term between action `i` and `j`.\nlen_list: int (> 1)\n- Length of a list of actions recommended in each impression.\n+ Length of a list of actions recommended in each slate.\nWhen Open Bandit Dataset is used, 3 should be set.\nis_cascade: bool\n@@ -693,7 +693,7 @@ def action_interaction_additive_reward_function(\n)\nif reward_type == \"binary\":\nexpected_reward = np.log(expected_reward / (1 - expected_reward))\n- expected_reward_factual = np.zeros_like(action_2d)\n+ expected_reward_factual = np.zeros_like(action_2d, dtype=float)\nfor position_ in np.arange(len_list):\ntmp_fixed_reward = expected_reward[\nnp.arange(context.shape[0]), action_2d[:, position_]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -86,8 +86,8 @@ invalid_input_of_init = [\n\"dim_context must be a positive integer\",\n),\n(5, 3, 2, \"aaa\", \"independent\", \"pbm\", 1, \"reward_type must be either\"),\n- (5, 3, 2, \"binary\", \"aaa\", \"pbm\", 1, \"reward_structure must be either\"),\n- (5, 3, 2, \"binary\", \"independent\", \"aaa\", 1, \"click_model must be either\"),\n+ (5, 3, 2, \"binary\", \"aaa\", \"pbm\", 1, \"reward_structure must be one of\"),\n+ (5, 3, 2, \"binary\", \"independent\", \"aaa\", 1, \"click_model must be one of\"),\n(5, 3, 2, \"binary\", \"independent\", \"pbm\", \"x\", \"random_state must be an integer\"),\n(5, 3, 2, \"binary\", \"independent\", \"pbm\", None, \"random_state must be an integer\"),\n]\n@@ -134,7 +134,7 @@ def check_slate_bandit_feedback(bandit_feedback: BanditFeedback):\npscore_columns.append(column)\nassert (\nlen(pscore_columns) > 0\n- ), f\"bandit feedback must contains at least one of the following pscore columns: {pscore_candidate_columns}\"\n+ ), f\"bandit feedback must contain at least one of the following pscore columns: {pscore_candidate_columns}\"\nbandit_feedback_df = pd.DataFrame()\nfor column in [\"slate_id\", \"position\", \"action\"] + pscore_columns:\nbandit_feedback_df[column] = bandit_feedback[column]\n@@ -147,10 +147,10 @@ def check_slate_bandit_feedback(bandit_feedback: BanditFeedback):\n# check uniqueness\nassert (\nbandit_feedback_df.duplicated([\"slate_id\", \"position\"]).sum() == 0\n- ), \"position must not be duplicated in each impression\"\n+ ), \"position must not be duplicated in each slate\"\nassert (\nbandit_feedback_df.duplicated([\"slate_id\", \"action\"]).sum() == 0\n- ), \"action must not be duplicated in each impression\"\n+ ), \"action must not be duplicated in each slate\"\n# check pscores\nfor column in pscore_columns:\ninvalid_pscore_flgs = (bandit_feedback_df[column] < 0) | (\n@@ -160,16 +160,18 @@ def check_slate_bandit_feedback(bandit_feedback: BanditFeedback):\nif \"pscore_cascade\" in pscore_columns and \"pscore\" in pscore_columns:\nassert (\nbandit_feedback_df[\"pscore_cascade\"] < bandit_feedback_df[\"pscore\"]\n- ).sum() == 0, \"pscore_cascade is smaller or equal to pscore\"\n+ ).sum() == 0, \"pscore_cascade must be larger than or equal to pscore\"\nif \"pscore_item_position\" in pscore_columns and \"pscore\" in pscore_columns:\nassert (\nbandit_feedback_df[\"pscore_item_position\"] < bandit_feedback_df[\"pscore\"]\n- ).sum() == 0, \"pscore is smaller or equal to pscore_item_position\"\n+ ).sum() == 0, \"pscore must be larger than or equal to pscore_item_position\"\nif \"pscore_item_position\" in pscore_columns and \"pscore_cascade\" in pscore_columns:\nassert (\nbandit_feedback_df[\"pscore_item_position\"]\n< bandit_feedback_df[\"pscore_cascade\"]\n- ).sum() == 0, \"pscore_cascade is smaller or equal to pscore_item_position\"\n+ ).sum() == 0, (\n+ \"pscore_cascade must be larger than or equal to pscore_item_position\"\n+ )\nif \"pscore_cascade\" in pscore_columns:\nprevious_minimum_pscore_cascade = (\nbandit_feedback_df.groupby(\"slate_id\")[\"pscore_cascade\"]\n@@ -179,14 +181,14 @@ def check_slate_bandit_feedback(bandit_feedback: BanditFeedback):\n)\nassert (\nprevious_minimum_pscore_cascade < bandit_feedback_df[\"pscore_cascade\"]\n- ).sum() == 0, \"pscore_cascade must be non-decresing sequence in each impression\"\n+ ).sum() == 0, \"pscore_cascade must be non-decresing sequence in each slate\"\nif \"pscore\" in pscore_columns:\ncount_pscore_in_expression = bandit_feedback_df.groupby(\"slate_id\").apply(\nlambda x: x[\"pscore\"].unique().shape[0]\n)\nassert (\ncount_pscore_in_expression != 1\n- ).sum() == 0, \"pscore must be unique in each impression\"\n+ ).sum() == 0, \"pscore must be unique in each slate\"\nif \"pscore\" in pscore_columns and \"pscore_cascade\" in pscore_columns:\nlast_slot_feedback_df = bandit_feedback_df.drop_duplicates(\n\"slate_id\", keep=\"last\"\n@@ -239,7 +241,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behav\n), f\"pscore_cascade must be {pscore_cascade} for all impresessions\"\nassert np.allclose(\nbandit_feedback_df[\"pscore\"].unique(), [pscore_above]\n- ), f\"pscore must be {pscore_above} for all impressions\"\n+ ), f\"pscore must be {pscore_above} for all slates\"\ndef test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy_largescale():\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix bugs; apply review of test_synthetic_slate |
641,006 | 19.04.2021 02:23:51 | -32,400 | 88ff2a9dfd63e1340fe78cb6b570f4382e5739a0 | fix slate ope estimators and add validations | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_slate.py",
"new_path": "obp/ope/estimators_slate.py",
"diff": "@@ -9,7 +9,12 @@ from typing import Dict, Optional\nimport numpy as np\nfrom sklearn.utils import check_random_state\n-from ..utils import check_confidence_interval_arguments\n+from ..utils import (\n+ check_confidence_interval_arguments,\n+ check_sips_ope_inputs,\n+ check_iips_ope_inputs,\n+ check_rips_ope_inputs,\n+)\n@dataclass\n@@ -41,25 +46,9 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nLength of a list of actions recommended in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- position_weight: array-like, shape (len_list,)\n- Non-negative weight for each slot.\n-\n\"\"\"\nlen_list: int\n- position_weight: Optional[np.ndarray] = None\n-\n- def __post_init__(self) -> None:\n- if self.position_weight is None:\n- self.position_weight = np.ones(self.len_list)\n- else:\n- if not isinstance(self.position_weight, np.ndarray):\n- raise ValueError(\"position weight type\")\n- if not (\n- self.position_weight.ndim == 1\n- and self.position_weight.shape[0] == self.len_list\n- ):\n- raise ValueError(\"position weight shape\")\ndef _estimate_round_rewards(\nself,\n@@ -67,7 +56,6 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nposition: np.ndarray,\nbehavior_policy_pscore: np.ndarray,\nevaluation_policy_pscore: np.ndarray,\n- position_weight: np.ndarray,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate rewards for each round and slot.\n@@ -86,18 +74,14 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nevaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n- position_weight: array-like, shape (len_list,)\n- Non-negative weight for each slot.\n-\nReturns\n----------\nestimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by IPW for each round and slot (weighted based on position_weight).\n+ Rewards estimated by IPW for each round and slot.\n\"\"\"\n- reward_weight = np.vectorize(lambda x: position_weight[x])(position)\niw = evaluation_policy_pscore / behavior_policy_pscore\n- estimated_rewards = reward * iw * reward_weight\n+ estimated_rewards = reward * iw\nreturn estimated_rewards\n@staticmethod\n@@ -112,7 +96,7 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nSlate id observed in each round of the logged bandit feedback.\nestimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by IPW for each round and slot (weighted based on position_weight).\n+ Rewards estimated by IPW for each round and slot.\nsampled_slate: array-like, shape (n_rounds,)\nSlate id sampled by bootstrap.\n@@ -145,7 +129,7 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nSlate id observed in each round of the logged bandit feedback.\nestimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by IPW for each round and slot (weighted based on position_weight).\n+ Rewards estimated by IPW for each round and slot.\nalpha: float, default=0.05\nSignificant level of confidence intervals.\n@@ -190,11 +174,11 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\n@dataclass\nclass SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\n- \"\"\"Estimate the policy value by Slate Standard Inverse Probability Scoring (SIPS).\n+ \"\"\"Estimate the policy value by Slate Standard Inverse Propensity Scoring (SIPS).\nNote\n-------\n- Slate Standard Inverse Probability Scoring (SIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e` by\n+ Slate Standard Inverse Propensity Scoring (SIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e`.\nParameters\n----------\n@@ -205,12 +189,14 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\n------------\nJames McInerney, Brian Brost, Praveen Chandar, Rishabh Mehrotra, and Ben Carterette.\n\"Counterfactual Evaluation of Slate Recommendations with Sequential Reward Interactions\", 2020.\n+\n\"\"\"\nestimator_name: str = \"sips\"\ndef estimate_policy_value(\nself,\n+ slate_id: np.ndarray,\nreward: np.ndarray,\nposition: np.ndarray,\npscore: np.ndarray,\n@@ -221,6 +207,9 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nParameters\n----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\nreward: array-like, shape (<= n_rounds * len_list,)\nReward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n@@ -229,6 +218,7 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\npscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+ This parameter must be unique in each slate.\nevaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n@@ -239,12 +229,18 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n+ check_sips_ope_inputs(\n+ slate_id=slate_id,\n+ reward=reward,\n+ position=position,\n+ pscore=pscore,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\nreturn self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore,\nevaluation_policy_pscore=evaluation_policy_pscore,\n- position_weight=self.position_weight,\n).mean()\ndef estimate_interval(\n@@ -274,6 +270,7 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\npscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+ This parameter must be unique in each slate.\nevaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n@@ -293,38 +290,96 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n+ check_sips_ope_inputs(\n+ slate_id=slate_id,\n+ reward=reward,\n+ position=position,\n+ pscore=pscore,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\nestimated_rewards = self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore,\nevaluation_policy_pscore=evaluation_policy_pscore,\n- position_weight=self.position_weight,\n)\nreturn self._estimate_slate_confidence_interval_by_bootstrap(\n- slate_id=slate_id, estimated_rewards=estimated_rewards\n+ slate_id=slate_id,\n+ estimated_rewards=estimated_rewards,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n)\n@dataclass\nclass SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\n- \"\"\"Estimate the policy value by Slate Independent Inverse Probability Scoring (IIPS).\"\"\"\n+ \"\"\"Estimate the policy value by Slate Independent Inverse Propensity Scoring (IIPS).\n+\n+ Note\n+ -------\n+ Slate Independent Inverse Propensity Scoring (IIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e`.\n+\n+ Parameters\n+ ----------\n+ estimator_name: str, default='iips'.\n+ Name of off-policy estimator.\n+\n+ References\n+ ------------\n+ Shuai Li, Yasin Abbasi-Yadkori, Branislav Kveton, S. Muthukrishnan, Vishwa Vinay, Zheng Wen.\n+ \"Offline Evaluation of Ranking Policies with Click Models\", 2018.\n+\n+ \"\"\"\nestimator_name: str = \"iips\"\ndef estimate_policy_value(\nself,\n+ slate_id: np.ndarray,\nreward: np.ndarray,\nposition: np.ndarray,\npscore_item_position: np.ndarray,\nevaluation_policy_pscore: np.ndarray,\n**kwargs,\n) -> float:\n+ \"\"\"Estimate policy value of an evaluation policy.\n+\n+ Parameters\n+ ----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore_item_position: array-like, shape (<= n_rounds * len_list,)\n+ Marginal action choice probabilities of the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_{t, k}|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Marginal action choice probabilities of the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_{t, k}|x_t)`.\n+\n+ Returns\n+ ----------\n+ V_hat: float\n+ Estimated policy value (performance) of a given evaluation policy.\n+\n+ \"\"\"\n+ check_iips_ope_inputs(\n+ slate_id=slate_id,\n+ reward=reward,\n+ position=position,\n+ pscore_item_position=pscore_item_position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\nreturn self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_item_position,\nevaluation_policy_pscore=evaluation_policy_pscore,\n- position_weight=self.position_weight,\n).mean()\ndef estimate_interval(\n@@ -339,38 +394,131 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\n+\n+ Parameters\n+ ----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore_item_position: array-like, shape (<= n_rounds * len_list,)\n+ Marginal action choice probabilities of the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_{t, k}|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Marginal action choice probabilities of the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_{t, k}|x_t)`.\n+\n+ alpha: float, default=0.05\n+ P-value.\n+\n+ n_bootstrap_samples: int, default=10000\n+ Number of resampling performed in the bootstrap procedure.\n+\n+ random_state: int, default=None\n+ Controls the random seed in bootstrap sampling.\n+\n+ Returns\n+ ----------\n+ estimated_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\n+ check_iips_ope_inputs(\n+ slate_id=slate_id,\n+ reward=reward,\n+ position=position,\n+ pscore_item_position=pscore_item_position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\nestimated_rewards = self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_item_position,\nevaluation_policy_pscore=evaluation_policy_pscore,\n- position_weight=self.position_weight,\n)\nreturn self._estimate_slate_confidence_interval_by_bootstrap(\n- slate_id=slate_id, estimated_rewards=estimated_rewards\n+ slate_id=slate_id,\n+ estimated_rewards=estimated_rewards,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n)\n@dataclass\nclass SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\n- \"\"\"Estimate the policy value by Slate Recursive Inverse Probability Scoring (RIPS).\"\"\"\n+ \"\"\"Estimate the policy value by Slate Recursive Inverse Propensity Scoring (RIPS).\n+\n+ Note\n+ -------\n+ Slate Recursive Inverse Propensity Scoring (RIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e`.\n+\n+ Parameters\n+ ----------\n+ estimator_name: str, default='rips'.\n+ Name of off-policy estimator.\n+\n+ References\n+ ------------\n+ James McInerney, Brian Brost, Praveen Chandar, Rishabh Mehrotra, and Ben Carterette.\n+ \"Counterfactual Evaluation of Slate Recommendations with Sequential Reward Interactions\", 2020.\n+\n+ \"\"\"\nestimator_name: str = \"rips\"\ndef estimate_policy_value(\nself,\n+ slate_id: np.ndarray,\nreward: np.ndarray,\nposition: np.ndarray,\npscore_cascade: np.ndarray,\nevaluation_policy_pscore: np.ndarray,\n**kwargs,\n) -> float:\n+ \"\"\"Estimate policy value of an evaluation policy.\n+\n+ Parameters\n+ ----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore_cascade: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities above the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities above the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n+\n+ Returns\n+ ----------\n+ V_hat: float\n+ Estimated policy value (performance) of a given evaluation policy.\n+\n+ \"\"\"\n+\n+ check_rips_ope_inputs(\n+ slate_id=slate_id,\n+ reward=reward,\n+ position=position,\n+ pscore_cascade=pscore_cascade,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\nreturn self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_cascade,\nevaluation_policy_pscore=evaluation_policy_pscore,\n- position_weight=self.position_weight,\n).mean()\ndef estimate_interval(\n@@ -385,13 +533,57 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\n+\n+ Parameters\n+ ----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore_cascade: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities above the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities above the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n+\n+ alpha: float, default=0.05\n+ P-value.\n+\n+ n_bootstrap_samples: int, default=10000\n+ Number of resampling performed in the bootstrap procedure.\n+\n+ random_state: int, default=None\n+ Controls the random seed in bootstrap sampling.\n+\n+ Returns\n+ ----------\n+ estimated_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\n+ check_rips_ope_inputs(\n+ slate_id=slate_id,\n+ reward=reward,\n+ position=position,\n+ pscore_cascade=pscore_cascade,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\nestimated_rewards = self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_cascade,\nevaluation_policy_pscore=evaluation_policy_pscore,\n- position_weight=self.position_weight,\n)\nreturn self._estimate_slate_confidence_interval_by_bootstrap(\n- slate_id=slate_id, estimated_rewards=estimated_rewards\n+ slate_id=slate_id,\n+ estimated_rewards=estimated_rewards,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "from typing import Dict, Optional, Union\nimport numpy as np\n+import pandas as pd\nfrom sklearn.utils import check_random_state\nimport torch\n@@ -243,7 +244,7 @@ def check_ope_inputs(\npscore: Optional[np.ndarray] = None,\nestimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n) -> Optional[ValueError]:\n- \"\"\"Check inputs for bandit learning or simulation.\n+ \"\"\"Check inputs for ope.\nParameters\n-----------\n@@ -326,7 +327,7 @@ def check_ope_inputs(\n\"action elements must be smaller than the second dimension of action_dist\"\n)\n- # pscpre\n+ # pscore\nif pscore is not None:\nif not isinstance(pscore, np.ndarray):\nraise ValueError(\"pscore must be ndarray\")\n@@ -338,6 +339,287 @@ def check_ope_inputs(\nraise ValueError(\"pscore must be positive\")\n+def check_sips_ope_inputs(\n+ slate_id: np.ndarray,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+) -> Optional[ValueError]:\n+ \"\"\"Check inputs for sips ope.\n+\n+ Parameters\n+ -----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ \"\"\"\n+ # position\n+ if not isinstance(position, np.ndarray):\n+ raise ValueError(\"position must be ndarray\")\n+ if position.ndim != 1:\n+ raise ValueError(\"position must be 1-dimensional\")\n+ if not (position.dtype == int and position.min() >= 0):\n+ raise ValueError(\"position elements must be non-negative integers\")\n+\n+ # reward\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional\")\n+\n+ # pscore\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+ if pscore.ndim != 1:\n+ raise ValueError(\"pscore must be 1-dimensional\")\n+ if np.any(pscore <= 0) or np.any(pscore > 1):\n+ raise ValueError(\"pscore must be in the range of (0, 1]\")\n+\n+ # evaluation_policy_pscore\n+ if not isinstance(evaluation_policy_pscore, np.ndarray):\n+ raise ValueError(\"evaluation_policy_pscore must be ndarray\")\n+ if evaluation_policy_pscore.ndim != 1:\n+ raise ValueError(\"evaluation_policy_pscore must be 1-dimensional\")\n+ if np.any(evaluation_policy_pscore <= 0) or np.any(evaluation_policy_pscore > 1):\n+ raise ValueError(\"evaluation_policy_pscore must be in the range of (0, 1]\")\n+\n+ # slate id\n+ if not isinstance(slate_id, np.ndarray):\n+ raise ValueError(\"slate_id must be ndarray\")\n+ if slate_id.ndim != 1:\n+ raise ValueError(\"slate_id must be 1-dimensional\")\n+ if not (slate_id.dtype == int and slate_id.min() >= 0):\n+ raise ValueError(\"slate_id elements must be non-negative integers\")\n+ if not (\n+ slate_id.shape[0]\n+ == position.shape[0]\n+ == reward.shape[0]\n+ == pscore.shape[0]\n+ == evaluation_policy_pscore.shape[0]\n+ ):\n+ raise ValueError(\n+ \"slate_id, position, reward, pscore, and evaluation_policy_pscore must be the same size.\"\n+ )\n+\n+ bandit_feedback_df = pd.DataFrame()\n+ bandit_feedback_df[\"slate_id\"] = slate_id\n+ bandit_feedback_df[\"reward\"] = reward\n+ bandit_feedback_df[\"position\"] = position\n+ bandit_feedback_df[\"pscore\"] = pscore\n+ bandit_feedback_df[\"evaluation_policy_pscore\"] = evaluation_policy_pscore\n+ # check uniqueness\n+ if bandit_feedback_df.duplicated([\"slate_id\", \"position\"]).sum() > 0:\n+ raise ValueError(\"position must not be duplicated in each slate\")\n+ # check pscore uniqueness\n+ distinct_count_pscore_in_slate = bandit_feedback_df.groupby(\"slate_id\").apply(\n+ lambda x: x[\"pscore\"].unique().shape[0]\n+ )\n+ if (distinct_count_pscore_in_slate != 1).sum() > 0:\n+ raise ValueError(\"pscore must be unique in each slate\")\n+\n+\n+def check_iips_ope_inputs(\n+ slate_id: np.ndarray,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore_item_position: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+) -> Optional[ValueError]:\n+ \"\"\"Check inputs for sips ope.\n+\n+ Parameters\n+ -----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore_item_position: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ \"\"\"\n+ # position\n+ if not isinstance(position, np.ndarray):\n+ raise ValueError(\"position must be ndarray\")\n+ if position.ndim != 1:\n+ raise ValueError(\"position must be 1-dimensional\")\n+ if not (position.dtype == int and position.min() >= 0):\n+ raise ValueError(\"position elements must be non-negative integers\")\n+\n+ # reward\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional\")\n+\n+ # pscore_item_position\n+ if not isinstance(pscore_item_position, np.ndarray):\n+ raise ValueError(\"pscore_item_position must be ndarray\")\n+ if pscore_item_position.ndim != 1:\n+ raise ValueError(\"pscore_item_position must be 1-dimensional\")\n+ if np.any(pscore_item_position <= 0) or np.any(pscore_item_position > 1):\n+ raise ValueError(\"pscore_item_position must be in the range of (0, 1]\")\n+\n+ # evaluation_policy_pscore\n+ if not isinstance(evaluation_policy_pscore, np.ndarray):\n+ raise ValueError(\"evaluation_policy_pscore must be ndarray\")\n+ if evaluation_policy_pscore.ndim != 1:\n+ raise ValueError(\"evaluation_policy_pscore must be 1-dimensional\")\n+ if np.any(evaluation_policy_pscore <= 0) or np.any(evaluation_policy_pscore > 1):\n+ raise ValueError(\"evaluation_policy_pscore must be in the range of (0, 1]\")\n+\n+ # slate id\n+ if not isinstance(slate_id, np.ndarray):\n+ raise ValueError(\"slate_id must be ndarray\")\n+ if slate_id.ndim != 1:\n+ raise ValueError(\"slate_id must be 1-dimensional\")\n+ if not (slate_id.dtype == int and slate_id.min() >= 0):\n+ raise ValueError(\"slate_id elements must be non-negative integers\")\n+ if not (\n+ slate_id.shape[0]\n+ == position.shape[0]\n+ == reward.shape[0]\n+ == pscore_item_position.shape[0]\n+ == evaluation_policy_pscore.shape[0]\n+ ):\n+ raise ValueError(\n+ \"slate_id, position, reward, pscore_item_position, and evaluation_policy_pscore must be the same size.\"\n+ )\n+\n+ bandit_feedback_df = pd.DataFrame()\n+ bandit_feedback_df[\"slate_id\"] = slate_id\n+ bandit_feedback_df[\"reward\"] = reward\n+ bandit_feedback_df[\"position\"] = position\n+ bandit_feedback_df[\"pscore_item_position\"] = pscore_item_position\n+ bandit_feedback_df[\"evaluation_policy_pscore\"] = evaluation_policy_pscore\n+ # check uniqueness\n+ if bandit_feedback_df.duplicated([\"slate_id\", \"position\"]).sum() > 0:\n+ raise ValueError(\"position must not be duplicated in each slate\")\n+\n+\n+def check_rips_ope_inputs(\n+ slate_id: np.ndarray,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ pscore_cascade: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+) -> Optional[ValueError]:\n+ \"\"\"Check inputs for sips ope.\n+\n+ Parameters\n+ -----------\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ Positions of each round and slot in the given logged bandit feedback.\n+\n+ pscore_cascade: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ \"\"\"\n+ # position\n+ if not isinstance(position, np.ndarray):\n+ raise ValueError(\"position must be ndarray\")\n+ if position.ndim != 1:\n+ raise ValueError(\"position must be 1-dimensional\")\n+ if not (position.dtype == int and position.min() >= 0):\n+ raise ValueError(\"position elements must be non-negative integers\")\n+\n+ # reward\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional\")\n+\n+ # pscore_cascade\n+ if not isinstance(pscore_cascade, np.ndarray):\n+ raise ValueError(\"pscore_cascade must be ndarray\")\n+ if pscore_cascade.ndim != 1:\n+ raise ValueError(\"pscore_cascade must be 1-dimensional\")\n+ if np.any(pscore_cascade <= 0) or np.any(pscore_cascade > 1):\n+ raise ValueError(\"pscore_cascade must be in the range of (0, 1]\")\n+\n+ # evaluation_policy_pscore\n+ if not isinstance(evaluation_policy_pscore, np.ndarray):\n+ raise ValueError(\"evaluation_policy_pscore must be ndarray\")\n+ if evaluation_policy_pscore.ndim != 1:\n+ raise ValueError(\"evaluation_policy_pscore must be 1-dimensional\")\n+ if np.any(evaluation_policy_pscore <= 0) or np.any(evaluation_policy_pscore > 1):\n+ raise ValueError(\"evaluation_policy_pscore must be in the range of (0, 1]\")\n+\n+ # slate id\n+ if not isinstance(slate_id, np.ndarray):\n+ raise ValueError(\"slate_id must be ndarray\")\n+ if slate_id.ndim != 1:\n+ raise ValueError(\"slate_id must be 1-dimensional\")\n+ if not (slate_id.dtype == int and slate_id.min() >= 0):\n+ raise ValueError(\"slate_id elements must be non-negative integers\")\n+ if not (\n+ slate_id.shape[0]\n+ == position.shape[0]\n+ == reward.shape[0]\n+ == pscore_cascade.shape[0]\n+ == evaluation_policy_pscore.shape[0]\n+ ):\n+ raise ValueError(\n+ \"slate_id, position, reward, pscore_cascade, and evaluation_policy_pscore must be the same size.\"\n+ )\n+\n+ bandit_feedback_df = pd.DataFrame()\n+ bandit_feedback_df[\"slate_id\"] = slate_id\n+ bandit_feedback_df[\"reward\"] = reward\n+ bandit_feedback_df[\"position\"] = position\n+ bandit_feedback_df[\"pscore_cascade\"] = pscore_cascade\n+ bandit_feedback_df[\"evaluation_policy_pscore\"] = evaluation_policy_pscore\n+ # sort dataframe\n+ bandit_feedback_df = (\n+ bandit_feedback_df.sort_values([\"slate_id\", \"position\"])\n+ .reset_index(drop=True)\n+ .copy()\n+ )\n+ # check uniqueness\n+ if bandit_feedback_df.duplicated([\"slate_id\", \"position\"]).sum() > 0:\n+ raise ValueError(\"position must not be duplicated in each slate\")\n+ # check pscore_cascade structure\n+ previous_minimum_pscore_cascade = (\n+ bandit_feedback_df.groupby(\"slate_id\")[\"pscore_cascade\"]\n+ .expanding()\n+ .min()\n+ .values\n+ )\n+ if (\n+ previous_minimum_pscore_cascade < bandit_feedback_df[\"pscore_cascade\"]\n+ ).sum() > 0:\n+ raise ValueError(\"pscore_cascade must be non-increasing sequence in each slate\")\n+\n+\ndef check_ope_inputs_tensor(\naction_dist: torch.Tensor,\nposition: Optional[torch.Tensor] = None,\n@@ -432,7 +714,7 @@ def check_ope_inputs_tensor(\n\"action elements must be smaller than the second dimension of action_dist\"\n)\n- # pscpre\n+ # pscore\nif pscore is not None:\nif not isinstance(pscore, torch.Tensor):\nraise ValueError(\"pscore must be Tensor\")\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix slate ope estimators and add validations |
641,006 | 19.04.2021 02:24:05 | -32,400 | 59b7e95144225e2a4a2e7f729a0be74d5b6e23d4 | add slate ope tests | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ope/test_ipw_estimators_slate.py",
"diff": "+import pytest\n+import numpy as np\n+\n+from obp.ope import SlateStandardIPS, SlateIndependentIPS, SlateRecursiveIPS\n+\n+# setting\n+len_list = 3\n+sips = SlateStandardIPS(len_list=len_list)\n+iips = SlateIndependentIPS(len_list=len_list)\n+rips = SlateRecursiveIPS(len_list=len_list)\n+n_rounds = 5\n+\n+\n+# --- _extract_reward_by_bootstrap ---\n+def test_extract_reward_by_bootstrap() -> None:\n+ slate_id = np.repeat(np.arange(5), 3)\n+ estimated_rewards = np.random.normal(size=n_rounds * len_list)\n+ sampled_slate = np.array([0, 3, 0])\n+ sampled_estimated_rewards = sips._extract_reward_by_bootstrap(\n+ slate_id=slate_id,\n+ estimated_rewards=estimated_rewards,\n+ sampled_slate=sampled_slate,\n+ )\n+ correct_sampled_estimated_rewards = np.hstack(\n+ [estimated_rewards[0:3], estimated_rewards[9:12], estimated_rewards[0:3]]\n+ )\n+ assert np.allclose(sampled_estimated_rewards, correct_sampled_estimated_rewards)\n+\n+\n+# --- invalid (all slate estimators) ---\n+\n+# slate_id, reward, pscore, position, evaluation_policy_pscore, description\n+invalid_input_of_slate_estimators = [\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ \"4\", #\n+ np.ones(n_rounds * len_list),\n+ \"position must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds).reshape((n_rounds, len_list)), #\n+ np.ones(n_rounds * len_list),\n+ \"position must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds) - 1, #\n+ np.ones(n_rounds * len_list),\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ \"4\", #\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros((n_rounds, len_list), dtype=int), #\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"reward must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ \"4\", #\n+ \"evaluation_policy_pscore must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones((n_rounds, len_list)), #\n+ \"evaluation_policy_pscore must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list) + 1, #\n+ \"evaluation_policy_pscore must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list) - 1, #\n+ \"evaluation_policy_pscore must be in the range of\",\n+ ),\n+ (\n+ \"4\", #\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"slate_id must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list).reshape((n_rounds, len_list)), #\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"slate_id must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list) - 1, #\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"slate_id elements must be non-negative integers\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list), #\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.repeat(np.arange(n_rounds), len_list), #\n+ np.ones(n_rounds * len_list),\n+ \"position must not be duplicated in each slate\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"slate_id, reward, pscore, position, evaluation_policy_pscore, description\",\n+ invalid_input_of_slate_estimators,\n+)\n+def test_slate_estimators_using_invalid_input_data(\n+ slate_id, reward, pscore, position, evaluation_policy_pscore, description\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = sips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = sips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = iips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = iips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = rips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = rips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+\n+\n+# --- valid (all slate estimators) ---\n+\n+valid_input_of_slate_estimators = [\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"each slate has data of 3 (len_list) positions\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list)[:-1],\n+ np.zeros(n_rounds * len_list, dtype=int)[:-1],\n+ np.ones(n_rounds * len_list)[:-1],\n+ np.tile(np.arange(len_list), n_rounds)[:-1],\n+ np.ones(n_rounds * len_list)[:-1],\n+ \"last slate has data of 2 (len_list - 1) positions\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"slate_id, reward, pscore, position, evaluation_policy_pscore, description\",\n+ valid_input_of_slate_estimators,\n+)\n+def test_slate_estimators_using_valid_input_data(\n+ slate_id, reward, pscore, position, evaluation_policy_pscore, description\n+) -> None:\n+ _ = sips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = sips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = iips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = iips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = rips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = rips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+\n+\n+# --- invalid (sips) ---\n+invalid_input_of_sips = [\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ \"4\", #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones((n_rounds, len_list)), #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list) + 1, #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list) - 1, #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list - 1), #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"slate_id, position, reward, pscore, and evaluation_policy_pscore must be the same size\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.hstack([np.ones(n_rounds * len_list - 1), [0.2]]), #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore must be unique in each slate\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"slate_id, reward, pscore, position, evaluation_policy_pscore, description\",\n+ invalid_input_of_sips,\n+)\n+def test_sips_using_invalid_input_data(\n+ slate_id, reward, pscore, position, evaluation_policy_pscore, description\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = sips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = sips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+\n+\n+# --- invalid (iips) ---\n+invalid_input_of_iips = [\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ \"4\", #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_item_position must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones((n_rounds, len_list)), #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_item_position must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list) + 1, #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_item_position must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list) - 1, #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_item_position must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list - 1), #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"slate_id, position, reward, pscore_item_position, and evaluation_policy_pscore must be the same size\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"slate_id, reward, pscore_item_position, position, evaluation_policy_pscore, description\",\n+ invalid_input_of_iips,\n+)\n+def test_iips_using_invalid_input_data(\n+ slate_id,\n+ reward,\n+ pscore_item_position,\n+ position,\n+ evaluation_policy_pscore,\n+ description,\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = iips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = iips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+\n+\n+# --- invalid (rips) ---\n+invalid_input_of_rips = [\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ \"4\", #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_cascade must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones((n_rounds, len_list)), #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_cascade must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list) + 1, #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_cascade must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list) - 1, #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_cascade must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list - 1), #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"slate_id, position, reward, pscore_cascade, and evaluation_policy_pscore must be the same size\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.hstack([[0.2], np.ones(n_rounds * len_list - 1)]), #\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list),\n+ \"pscore_cascade must be non-increasing sequence in each slate\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"slate_id, reward, pscore_cascade, position, evaluation_policy_pscore, description\",\n+ invalid_input_of_rips,\n+)\n+def test_rips_using_invalid_input_data(\n+ slate_id, reward, pscore_cascade, position, evaluation_policy_pscore, description\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = rips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = rips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+\n+\n+# --- confidence intervals ---\n+# alpha, n_bootstrap_samples, random_state, description\n+invalid_input_of_estimate_intervals = [\n+ (0.05, 100, \"s\", \"random_state must be an integer\"),\n+ (0.05, -1, 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.05, \"s\", 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (1.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (\"0\", 1, 1, \"alpha must be a positive float (< 1)\"),\n+]\n+\n+valid_input_of_estimate_intervals = [\n+ (0.05, 100, 1, \"random_state is 1\"),\n+ (0.05, 1, 1, \"n_bootstrap_samples is 1\"),\n+]\n+\n+\[email protected](\n+ \"slate_id, reward, pscore, position, evaluation_policy_pscore, description_1\",\n+ valid_input_of_slate_estimators,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description_2\",\n+ invalid_input_of_estimate_intervals,\n+)\n+def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\n+ slate_id,\n+ reward,\n+ pscore,\n+ position,\n+ evaluation_policy_pscore,\n+ description_1,\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description_2,\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = sips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = iips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = rips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"slate_id, reward, pscore, position, evaluation_policy_pscore, description_1\",\n+ valid_input_of_slate_estimators,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description_2\",\n+ valid_input_of_estimate_intervals,\n+)\n+def test_estimate_intervals_of_all_estimators_using_valid_input_data(\n+ slate_id,\n+ reward,\n+ pscore,\n+ position,\n+ evaluation_policy_pscore,\n+ description_1,\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description_2,\n+) -> None:\n+ _ = sips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = iips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = rips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add slate ope tests |
641,006 | 19.04.2021 04:05:41 | -32,400 | 4258c7c177de9186ed453d0a53c692072b9d5e26 | fix synthetic slate bug | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -591,7 +591,7 @@ def generate_symmetric_matrix(n_unique_action: int, random_state: int) -> np.nda\nsymmetric_matrix: array-like, shape (n_unique_action, n_unique_action)\n\"\"\"\nrandom_ = check_random_state(random_state)\n- base_matrix = random_.normal(size=(n_unique_action, n_unique_action))\n+ base_matrix = random_.normal(scale=5, size=(n_unique_action, n_unique_action))\nsymmetric_matrix = (\nnp.tril(base_matrix) + np.tril(base_matrix).T - np.diag(base_matrix.diagonal())\n)\n@@ -698,7 +698,7 @@ def action_interaction_additive_reward_function(\ntmp_fixed_reward = expected_reward[\nnp.arange(context.shape[0]), action_2d[:, position_]\n]\n- for position2_ in np.arange(len_list):\n+ for position2_ in np.arange(len_list)[::-1]:\nif is_cascade:\nif position_ >= position2_:\nbreak\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix synthetic slate bug |
641,006 | 19.04.2021 04:06:18 | -32,400 | 6c245cd57617741f681763e884aa4d4492a510fa | add slate ope performance test | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators_slate.py",
"new_path": "tests/ope/test_ipw_estimators_slate.py",
"diff": "@@ -2,6 +2,11 @@ import pytest\nimport numpy as np\nfrom obp.ope import SlateStandardIPS, SlateIndependentIPS, SlateRecursiveIPS\n+from obp.dataset import (\n+ logistic_reward_function,\n+ linear_behavior_policy_logit,\n+ SyntheticSlateBanditDataset,\n+)\n# setting\nlen_list = 3\n@@ -611,3 +616,195 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n)\n+\n+\n+def test_slate_ope_performance_using_cascade_additive_log():\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 1000\n+ reward_structure = \"cascade_additive\"\n+ click_model = None\n+ behavior_policy_function = linear_behavior_policy_logit\n+ reward_function = logistic_reward_function\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ random_state=random_state,\n+ behavior_policy_function=behavior_policy_function,\n+ base_reward_function=reward_function,\n+ )\n+ random_behavior_dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ random_state=random_state,\n+ behavior_policy_function=None,\n+ base_reward_function=reward_function,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ slate_id = bandit_feedback[\"slate_id\"]\n+ reward = bandit_feedback[\"reward\"]\n+ pscore = bandit_feedback[\"pscore\"]\n+ pscore_item_position = bandit_feedback[\"pscore_item_position\"]\n+ pscore_cascade = bandit_feedback[\"pscore_cascade\"]\n+ position = bandit_feedback[\"position\"]\n+\n+ # obtain random behavior feedback\n+ random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_exact_uniform_pscore_item_position=True\n+ )\n+\n+ sips_estimated_policy_value = sips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore\"],\n+ )\n+ iips_estimated_policy_value = iips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore_item_position\"],\n+ )\n+ rips_estimated_policy_value = rips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore_cascade\"],\n+ )\n+ # compute statistics of ground truth policy value\n+ q_pi_e = (\n+ random_behavior_feedback[\"reward\"]\n+ .reshape((n_rounds, dataset.len_list))\n+ .mean(axis=1)\n+ )\n+ gt_mean = q_pi_e.mean()\n+ gt_std = q_pi_e.std(ddof=1)\n+ print(\"Cascade additive\")\n+ # check the performance of OPE\n+ ci_bound = gt_std * 3 / np.sqrt(q_pi_e.shape[0])\n+ print(f\"gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}\")\n+ estimated_policy_value = {\n+ \"sips\": sips_estimated_policy_value,\n+ \"iips\": iips_estimated_policy_value,\n+ \"rips\": rips_estimated_policy_value,\n+ }\n+ for key in estimated_policy_value:\n+ print(\n+ f\"estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, \"\n+ )\n+ # test the performance of each estimator\n+ assert (\n+ np.abs(gt_mean - estimated_policy_value[key]) <= ci_bound\n+ ), f\"OPE of {key} did not work well (absolute error is greater than 3*sigma)\"\n+\n+\n+def test_slate_ope_performance_using_independent_log():\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 1000\n+ reward_structure = \"independent\"\n+ click_model = None\n+ behavior_policy_function = linear_behavior_policy_logit\n+ reward_function = logistic_reward_function\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ random_state=random_state,\n+ behavior_policy_function=behavior_policy_function,\n+ base_reward_function=reward_function,\n+ )\n+ random_behavior_dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ random_state=random_state,\n+ behavior_policy_function=None,\n+ base_reward_function=reward_function,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ slate_id = bandit_feedback[\"slate_id\"]\n+ reward = bandit_feedback[\"reward\"]\n+ pscore = bandit_feedback[\"pscore\"]\n+ pscore_item_position = bandit_feedback[\"pscore_item_position\"]\n+ pscore_cascade = bandit_feedback[\"pscore_cascade\"]\n+ position = bandit_feedback[\"position\"]\n+\n+ # obtain random behavior feedback\n+ random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_exact_uniform_pscore_item_position=True\n+ )\n+\n+ sips_estimated_policy_value = sips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore\"],\n+ )\n+ iips_estimated_policy_value = iips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore_item_position\"],\n+ )\n+ rips_estimated_policy_value = rips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore_cascade\"],\n+ )\n+ # compute statistics of ground truth policy value\n+ q_pi_e = (\n+ random_behavior_feedback[\"reward\"]\n+ .reshape((n_rounds, dataset.len_list))\n+ .mean(axis=1)\n+ )\n+ gt_mean = q_pi_e.mean()\n+ gt_std = q_pi_e.std(ddof=1)\n+ print(\"Independent\")\n+ # check the performance of OPE\n+ ci_bound = gt_std * 3 / np.sqrt(q_pi_e.shape[0])\n+ print(f\"gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}\")\n+ estimated_policy_value = {\n+ \"sips\": sips_estimated_policy_value,\n+ \"iips\": iips_estimated_policy_value,\n+ \"rips\": rips_estimated_policy_value,\n+ }\n+ for key in estimated_policy_value:\n+ print(\n+ f\"estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, \"\n+ )\n+ # test the performance of each estimator\n+ assert (\n+ np.abs(gt_mean - estimated_policy_value[key]) <= ci_bound\n+ ), f\"OPE of {key} did not work well (absolute error is greater than 3*sigma)\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add slate ope performance test |
641,006 | 25.04.2021 18:29:01 | -32,400 | 6d13599b83c7d643b2586f31736edf1ee486bfc0 | fix column names related to evaluation_policy_pscore; add efficient bootstrap method | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -11,6 +11,7 @@ from obp.ope.estimators_slate import SlateStandardIPS\nfrom obp.ope.estimators_slate import SlateIndependentIPS\nfrom obp.ope.estimators_slate import SlateRecursiveIPS\nfrom obp.ope.meta import OffPolicyEvaluation\n+from obp.ope.meta_slate import SlateOffPolicyEvaluation\nfrom obp.ope.regression_model import RegressionModel\n__all__ = [\n@@ -24,6 +25,7 @@ __all__ = [\n\"SwitchDoublyRobust\",\n\"DoublyRobustWithShrinkage\",\n\"OffPolicyEvaluation\",\n+ \"SlateOffPolicyEvaluation\",\n\"RegressionModel\",\n\"SlateStandardIPS\",\n\"SlateIndependentIPS\",\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_slate.py",
"new_path": "obp/ope/estimators_slate.py",
"diff": "@@ -7,10 +7,9 @@ from dataclasses import dataclass\nfrom typing import Dict, Optional\nimport numpy as np\n-from sklearn.utils import check_random_state\nfrom ..utils import (\n- check_confidence_interval_arguments,\n+ estimate_confidence_interval_by_bootstrap,\ncheck_sips_ope_inputs,\ncheck_iips_ope_inputs,\ncheck_rips_ope_inputs,\n@@ -37,7 +36,6 @@ class BaseSlateOffPolicyEstimator(metaclass=ABCMeta):\nraise NotImplementedError\n-# TODO :comment\n@dataclass\nclass BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\n\"\"\"Base Class of Slate Inverse Probability Weighting.\n@@ -84,35 +82,6 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nestimated_rewards = reward * iw\nreturn estimated_rewards\n- @staticmethod\n- def _extract_reward_by_bootstrap(\n- slate_id: np.ndarray, estimated_rewards: np.ndarray, sampled_slate: np.ndarray\n- ) -> np.ndarray:\n- \"\"\"Extract reward based on sampled slate.\n-\n- Parameters\n- ----------\n- slate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n-\n- estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by IPW for each round and slot.\n-\n- sampled_slate: array-like, shape (n_rounds,)\n- Slate id sampled by bootstrap.\n-\n- Returns\n- ----------\n- sampled_estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Estimated rewards sampled by bootstrap\n-\n- \"\"\"\n- sampled_estimated_rewards = list()\n- for slate in sampled_slate:\n- sampled_estimated_rewards.extend(estimated_rewards[slate_id == slate])\n- sampled_estimated_rewards = np.array(sampled_estimated_rewards)\n- return sampled_estimated_rewards\n-\ndef _estimate_slate_confidence_interval_by_bootstrap(\nself,\nslate_id: np.ndarray,\n@@ -146,31 +115,19 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n- check_confidence_interval_arguments(\n+ unique_slate = np.unique(slate_id)\n+ # sum estimated_rewards in each slate\n+ estimated_round_rewards = list()\n+ for slate in unique_slate:\n+ estimated_round_rewards.append(estimated_rewards[slate_id == slate].sum())\n+ estimated_round_rewards = np.array(estimated_round_rewards)\n+ return estimate_confidence_interval_by_bootstrap(\n+ samples=estimated_round_rewards,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n)\n- unique_slate = np.unique(slate_id)\n- boot_samples = list()\n- random_ = check_random_state(random_state)\n- for _ in np.arange(n_bootstrap_samples):\n- sampled_slate = random_.choice(unique_slate, size=unique_slate.shape[0])\n- sampled_estimated_rewards = self._extract_reward_by_bootstrap(\n- slate_id=slate_id,\n- estimated_rewards=estimated_rewards,\n- sampled_slate=sampled_slate,\n- )\n- boot_samples.append(sampled_estimated_rewards.sum() / unique_slate.shape[0])\n- lower_bound = np.percentile(boot_samples, 100 * (alpha / 2))\n- upper_bound = np.percentile(boot_samples, 100 * (1.0 - alpha / 2))\n- return {\n- \"mean\": np.mean(boot_samples),\n- f\"{100 * (1. - alpha)}% CI (lower)\": lower_bound,\n- f\"{100 * (1. - alpha)}% CI (upper)\": upper_bound,\n- }\n-\n@dataclass\nclass SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\n@@ -236,12 +193,15 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\npscore=pscore,\nevaluation_policy_pscore=evaluation_policy_pscore,\n)\n- return self._estimate_round_rewards(\n+ return (\n+ self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore,\nevaluation_policy_pscore=evaluation_policy_pscore,\n- ).mean()\n+ ).sum()\n+ / np.unique(slate_id).shape[0]\n+ )\ndef estimate_interval(\nself,\n@@ -340,7 +300,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nreward: np.ndarray,\nposition: np.ndarray,\npscore_item_position: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore_item_position: np.ndarray,\n**kwargs,\n) -> float:\n\"\"\"Estimate policy value of an evaluation policy.\n@@ -359,7 +319,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\npscore_item_position: array-like, shape (<= n_rounds * len_list,)\nMarginal action choice probabilities of the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_{t, k}|x_t)`.\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ evaluation_policy_pscore_item_position: array-like, shape (<= n_rounds * len_list,)\nMarginal action choice probabilities of the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_{t, k}|x_t)`.\nReturns\n@@ -373,14 +333,17 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nreward=reward,\nposition=position,\npscore_item_position=pscore_item_position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,\n)\n- return self._estimate_round_rewards(\n+ return (\n+ self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_item_position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n- ).mean()\n+ evaluation_policy_pscore=evaluation_policy_pscore_item_position,\n+ ).sum()\n+ / np.unique(slate_id).shape[0]\n+ )\ndef estimate_interval(\nself,\n@@ -388,7 +351,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nreward: np.ndarray,\nposition: np.ndarray,\npscore_item_position: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore_item_position: np.ndarray,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 10000,\nrandom_state: Optional[int] = None,\n@@ -410,7 +373,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\npscore_item_position: array-like, shape (<= n_rounds * len_list,)\nMarginal action choice probabilities of the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_{t, k}|x_t)`.\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ evaluation_policy_pscore_item_position: array-like, shape (<= n_rounds * len_list,)\nMarginal action choice probabilities of the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_{t, k}|x_t)`.\nalpha: float, default=0.05\n@@ -433,13 +396,13 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nreward=reward,\nposition=position,\npscore_item_position=pscore_item_position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,\n)\nestimated_rewards = self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_item_position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore=evaluation_policy_pscore_item_position,\n)\nreturn self._estimate_slate_confidence_interval_by_bootstrap(\nslate_id=slate_id,\n@@ -478,7 +441,7 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nreward: np.ndarray,\nposition: np.ndarray,\npscore_cascade: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore_cascade: np.ndarray,\n**kwargs,\n) -> float:\n\"\"\"Estimate policy value of an evaluation policy.\n@@ -497,7 +460,7 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\npscore_cascade: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities above the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ evaluation_policy_pscore_cascade: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities above the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\nReturns\n@@ -512,14 +475,17 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nreward=reward,\nposition=position,\npscore_cascade=pscore_cascade,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,\n)\n- return self._estimate_round_rewards(\n+ return (\n+ self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_cascade,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n- ).mean()\n+ evaluation_policy_pscore=evaluation_policy_pscore_cascade,\n+ ).sum()\n+ / np.unique(slate_id).shape[0]\n+ )\ndef estimate_interval(\nself,\n@@ -527,7 +493,7 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nreward: np.ndarray,\nposition: np.ndarray,\npscore_cascade: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore_cascade: np.ndarray,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 10000,\nrandom_state: Optional[int] = None,\n@@ -549,7 +515,7 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\npscore_cascade: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities above the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ evaluation_policy_pscore_cascade: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities above the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\nalpha: float, default=0.05\n@@ -572,13 +538,13 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nreward=reward,\nposition=position,\npscore_cascade=pscore_cascade,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,\n)\nestimated_rewards = self._estimate_round_rewards(\nreward=reward,\nposition=position,\nbehavior_policy_pscore=pscore_cascade,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore=evaluation_policy_pscore_cascade,\n)\nreturn self._estimate_slate_confidence_interval_by_bootstrap(\nslate_id=slate_id,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -429,6 +429,12 @@ def check_sips_ope_inputs(\n)\nif (distinct_count_pscore_in_slate != 1).sum() > 0:\nraise ValueError(\"pscore must be unique in each slate\")\n+ # check pscore uniqueness of evaluation policy\n+ distinct_count_evaluation_policy_pscore_in_slate = bandit_feedback_df.groupby(\n+ \"slate_id\"\n+ ).apply(lambda x: x[\"evaluation_policy_pscore\"].unique().shape[0])\n+ if (distinct_count_evaluation_policy_pscore_in_slate != 1).sum() > 0:\n+ raise ValueError(\"evaluation_policy_pscore must be unique in each slate\")\ndef check_iips_ope_inputs(\n@@ -436,7 +442,7 @@ def check_iips_ope_inputs(\nreward: np.ndarray,\nposition: np.ndarray,\npscore_item_position: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore_item_position: np.ndarray,\n) -> Optional[ValueError]:\n\"\"\"Check inputs for sips ope.\n@@ -454,8 +460,8 @@ def check_iips_ope_inputs(\npscore_item_position: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ evaluation_policy_pscore_item_position: array-like, shape (<= n_rounds * len_list,)\n+ Marginal action choice probabilities of the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_{t, k}|x_t)`.\n\"\"\"\n# position\n@@ -481,12 +487,16 @@ def check_iips_ope_inputs(\nraise ValueError(\"pscore_item_position must be in the range of (0, 1]\")\n# evaluation_policy_pscore\n- if not isinstance(evaluation_policy_pscore, np.ndarray):\n- raise ValueError(\"evaluation_policy_pscore must be ndarray\")\n- if evaluation_policy_pscore.ndim != 1:\n- raise ValueError(\"evaluation_policy_pscore must be 1-dimensional\")\n- if np.any(evaluation_policy_pscore <= 0) or np.any(evaluation_policy_pscore > 1):\n- raise ValueError(\"evaluation_policy_pscore must be in the range of (0, 1]\")\n+ if not isinstance(evaluation_policy_pscore_item_position, np.ndarray):\n+ raise ValueError(\"evaluation_policy_pscore_item_position must be ndarray\")\n+ if evaluation_policy_pscore_item_position.ndim != 1:\n+ raise ValueError(\"evaluation_policy_pscore_item_position must be 1-dimensional\")\n+ if np.any(evaluation_policy_pscore_item_position <= 0) or np.any(\n+ evaluation_policy_pscore_item_position > 1\n+ ):\n+ raise ValueError(\n+ \"evaluation_policy_pscore_item_position must be in the range of (0, 1]\"\n+ )\n# slate id\nif not isinstance(slate_id, np.ndarray):\n@@ -500,18 +510,15 @@ def check_iips_ope_inputs(\n== position.shape[0]\n== reward.shape[0]\n== pscore_item_position.shape[0]\n- == evaluation_policy_pscore.shape[0]\n+ == evaluation_policy_pscore_item_position.shape[0]\n):\nraise ValueError(\n- \"slate_id, position, reward, pscore_item_position, and evaluation_policy_pscore must be the same size.\"\n+ \"slate_id, position, reward, pscore_item_position, and evaluation_policy_pscore_item_position must be the same size.\"\n)\nbandit_feedback_df = pd.DataFrame()\nbandit_feedback_df[\"slate_id\"] = slate_id\n- bandit_feedback_df[\"reward\"] = reward\nbandit_feedback_df[\"position\"] = position\n- bandit_feedback_df[\"pscore_item_position\"] = pscore_item_position\n- bandit_feedback_df[\"evaluation_policy_pscore\"] = evaluation_policy_pscore\n# check uniqueness\nif bandit_feedback_df.duplicated([\"slate_id\", \"position\"]).sum() > 0:\nraise ValueError(\"position must not be duplicated in each slate\")\n@@ -522,7 +529,7 @@ def check_rips_ope_inputs(\nreward: np.ndarray,\nposition: np.ndarray,\npscore_cascade: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore_cascade: np.ndarray,\n) -> Optional[ValueError]:\n\"\"\"Check inputs for sips ope.\n@@ -540,8 +547,8 @@ def check_rips_ope_inputs(\npscore_cascade: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ evaluation_policy_pscore_cascade: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities above the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n\"\"\"\n# position\n@@ -567,12 +574,16 @@ def check_rips_ope_inputs(\nraise ValueError(\"pscore_cascade must be in the range of (0, 1]\")\n# evaluation_policy_pscore\n- if not isinstance(evaluation_policy_pscore, np.ndarray):\n- raise ValueError(\"evaluation_policy_pscore must be ndarray\")\n- if evaluation_policy_pscore.ndim != 1:\n- raise ValueError(\"evaluation_policy_pscore must be 1-dimensional\")\n- if np.any(evaluation_policy_pscore <= 0) or np.any(evaluation_policy_pscore > 1):\n- raise ValueError(\"evaluation_policy_pscore must be in the range of (0, 1]\")\n+ if not isinstance(evaluation_policy_pscore_cascade, np.ndarray):\n+ raise ValueError(\"evaluation_policy_pscore_cascade must be ndarray\")\n+ if evaluation_policy_pscore_cascade.ndim != 1:\n+ raise ValueError(\"evaluation_policy_pscore_cascade must be 1-dimensional\")\n+ if np.any(evaluation_policy_pscore_cascade <= 0) or np.any(\n+ evaluation_policy_pscore_cascade > 1\n+ ):\n+ raise ValueError(\n+ \"evaluation_policy_pscore_cascade must be in the range of (0, 1]\"\n+ )\n# slate id\nif not isinstance(slate_id, np.ndarray):\n@@ -586,10 +597,10 @@ def check_rips_ope_inputs(\n== position.shape[0]\n== reward.shape[0]\n== pscore_cascade.shape[0]\n- == evaluation_policy_pscore.shape[0]\n+ == evaluation_policy_pscore_cascade.shape[0]\n):\nraise ValueError(\n- \"slate_id, position, reward, pscore_cascade, and evaluation_policy_pscore must be the same size.\"\n+ \"slate_id, position, reward, pscore_cascade, and evaluation_policy_pscore_cascade must be the same size.\"\n)\nbandit_feedback_df = pd.DataFrame()\n@@ -597,7 +608,9 @@ def check_rips_ope_inputs(\nbandit_feedback_df[\"reward\"] = reward\nbandit_feedback_df[\"position\"] = position\nbandit_feedback_df[\"pscore_cascade\"] = pscore_cascade\n- bandit_feedback_df[\"evaluation_policy_pscore\"] = evaluation_policy_pscore\n+ bandit_feedback_df[\n+ \"evaluation_policy_pscore_cascade\"\n+ ] = evaluation_policy_pscore_cascade\n# sort dataframe\nbandit_feedback_df = (\nbandit_feedback_df.sort_values([\"slate_id\", \"position\"])\n@@ -618,6 +631,20 @@ def check_rips_ope_inputs(\nprevious_minimum_pscore_cascade < bandit_feedback_df[\"pscore_cascade\"]\n).sum() > 0:\nraise ValueError(\"pscore_cascade must be non-increasing sequence in each slate\")\n+ # check pscore_cascade structure of evaluation policy\n+ previous_minimum_evaluation_policy_pscore_cascade = (\n+ bandit_feedback_df.groupby(\"slate_id\")[\"evaluation_policy_pscore_cascade\"]\n+ .expanding()\n+ .min()\n+ .values\n+ )\n+ if (\n+ previous_minimum_evaluation_policy_pscore_cascade\n+ < bandit_feedback_df[\"evaluation_policy_pscore_cascade\"]\n+ ).sum() > 0:\n+ raise ValueError(\n+ \"evaluation_policy_pscore_cascade must be non-increasing sequence in each slate\"\n+ )\ndef check_ope_inputs_tensor(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/conftest.py",
"new_path": "tests/ope/conftest.py",
"diff": "@@ -11,6 +11,7 @@ from obp.dataset import (\nSyntheticBanditDataset,\nlogistic_reward_function,\nlinear_behavior_policy,\n+ SyntheticSlateBanditDataset,\n)\nfrom obp.utils import sigmoid\n@@ -33,6 +34,30 @@ def synthetic_bandit_feedback() -> BanditFeedback:\nreturn bandit_feedback\n+# generate synthetic slate dataset using SyntheticBanditDataset\[email protected](scope=\"session\")\n+def synthetic_slate_bandit_feedback() -> BanditFeedback:\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 100\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_exact_uniform_pscore_item_position=True\n+ )\n+ return bandit_feedback\n+\n+\n# make the expected reward of synthetic bandit feedback close to that of the Open Bandit Dataset\[email protected](scope=\"session\")\ndef fixed_synthetic_bandit_feedback(synthetic_bandit_feedback) -> BanditFeedback:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators_slate.py",
"new_path": "tests/ope/test_ipw_estimators_slate.py",
"diff": "@@ -16,22 +16,6 @@ rips = SlateRecursiveIPS(len_list=len_list)\nn_rounds = 5\n-# --- _extract_reward_by_bootstrap ---\n-def test_extract_reward_by_bootstrap() -> None:\n- slate_id = np.repeat(np.arange(5), 3)\n- estimated_rewards = np.random.normal(size=n_rounds * len_list)\n- sampled_slate = np.array([0, 3, 0])\n- sampled_estimated_rewards = sips._extract_reward_by_bootstrap(\n- slate_id=slate_id,\n- estimated_rewards=estimated_rewards,\n- sampled_slate=sampled_slate,\n- )\n- correct_sampled_estimated_rewards = np.hstack(\n- [estimated_rewards[0:3], estimated_rewards[9:12], estimated_rewards[0:3]]\n- )\n- assert np.allclose(sampled_estimated_rewards, correct_sampled_estimated_rewards)\n-\n-\n# --- invalid (all slate estimators) ---\n# slate_id, reward, pscore, position, evaluation_policy_pscore, description\n@@ -76,38 +60,6 @@ invalid_input_of_slate_estimators = [\nnp.ones(n_rounds * len_list),\n\"reward must be 1-dimensional\",\n),\n- (\n- np.repeat(np.arange(n_rounds), len_list),\n- np.zeros(n_rounds * len_list, dtype=int),\n- np.ones(n_rounds * len_list),\n- np.tile(np.arange(len_list), n_rounds),\n- \"4\", #\n- \"evaluation_policy_pscore must be ndarray\",\n- ),\n- (\n- np.repeat(np.arange(n_rounds), len_list),\n- np.zeros(n_rounds * len_list, dtype=int),\n- np.ones(n_rounds * len_list),\n- np.tile(np.arange(len_list), n_rounds),\n- np.ones((n_rounds, len_list)), #\n- \"evaluation_policy_pscore must be 1-dimensional\",\n- ),\n- (\n- np.repeat(np.arange(n_rounds), len_list),\n- np.zeros(n_rounds * len_list, dtype=int),\n- np.ones(n_rounds * len_list),\n- np.tile(np.arange(len_list), n_rounds),\n- np.ones(n_rounds * len_list) + 1, #\n- \"evaluation_policy_pscore must be in the range of\",\n- ),\n- (\n- np.repeat(np.arange(n_rounds), len_list),\n- np.zeros(n_rounds * len_list, dtype=int),\n- np.ones(n_rounds * len_list),\n- np.tile(np.arange(len_list), n_rounds),\n- np.ones(n_rounds * len_list) - 1, #\n- \"evaluation_policy_pscore must be in the range of\",\n- ),\n(\n\"4\", #\nnp.zeros(n_rounds * len_list, dtype=int),\n@@ -170,28 +122,28 @@ def test_slate_estimators_using_invalid_input_data(\nreward=reward,\npscore_item_position=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n)\n_ = iips.estimate_interval(\nslate_id=slate_id,\nreward=reward,\npscore_item_position=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n)\n_ = rips.estimate_policy_value(\nslate_id=slate_id,\nreward=reward,\npscore_cascade=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n)\n_ = rips.estimate_interval(\nslate_id=slate_id,\nreward=reward,\npscore_cascade=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n)\n@@ -243,28 +195,28 @@ def test_slate_estimators_using_valid_input_data(\nreward=reward,\npscore_item_position=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n)\n_ = iips.estimate_interval(\nslate_id=slate_id,\nreward=reward,\npscore_item_position=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n)\n_ = rips.estimate_policy_value(\nslate_id=slate_id,\nreward=reward,\npscore_cascade=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n)\n_ = rips.estimate_interval(\nslate_id=slate_id,\nreward=reward,\npscore_cascade=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n)\n@@ -318,6 +270,46 @@ invalid_input_of_sips = [\nnp.ones(n_rounds * len_list),\n\"pscore must be unique in each slate\",\n),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ \"4\", #\n+ \"evaluation_policy_pscore must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones((n_rounds, len_list)), #\n+ \"evaluation_policy_pscore must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list) + 1, #\n+ \"evaluation_policy_pscore must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list) - 1, #\n+ \"evaluation_policy_pscore must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.hstack([np.ones(n_rounds * len_list - 1), [0.2]]), #\n+ \"evaluation_policy_pscore must be unique in each slate\",\n+ ),\n]\n@@ -385,13 +377,45 @@ invalid_input_of_iips = [\nnp.ones(n_rounds * len_list - 1), #\nnp.tile(np.arange(len_list), n_rounds),\nnp.ones(n_rounds * len_list),\n- \"slate_id, position, reward, pscore_item_position, and evaluation_policy_pscore must be the same size\",\n+ \"slate_id, position, reward, pscore_item_position, and evaluation_policy_pscore_item_position must be the same size\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ \"4\", #\n+ \"evaluation_policy_pscore_item_position must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones((n_rounds, len_list)), #\n+ \"evaluation_policy_pscore_item_position must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list) + 1, #\n+ \"evaluation_policy_pscore_item_position must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list) - 1, #\n+ \"evaluation_policy_pscore_item_position must be in the range of\",\n),\n]\[email protected](\n- \"slate_id, reward, pscore_item_position, position, evaluation_policy_pscore, description\",\n+ \"slate_id, reward, pscore_item_position, position, evaluation_policy_pscore_item_position, description\",\ninvalid_input_of_iips,\n)\ndef test_iips_using_invalid_input_data(\n@@ -399,7 +423,7 @@ def test_iips_using_invalid_input_data(\nreward,\npscore_item_position,\nposition,\n- evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position,\ndescription,\n) -> None:\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n@@ -408,14 +432,14 @@ def test_iips_using_invalid_input_data(\nreward=reward,\npscore_item_position=pscore_item_position,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,\n)\n_ = iips.estimate_interval(\nslate_id=slate_id,\nreward=reward,\npscore_item_position=pscore_item_position,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,\n)\n@@ -459,7 +483,7 @@ invalid_input_of_rips = [\nnp.ones(n_rounds * len_list - 1), #\nnp.tile(np.arange(len_list), n_rounds),\nnp.ones(n_rounds * len_list),\n- \"slate_id, position, reward, pscore_cascade, and evaluation_policy_pscore must be the same size\",\n+ \"slate_id, position, reward, pscore_cascade, and evaluation_policy_pscore_cascade must be the same size\",\n),\n(\nnp.repeat(np.arange(n_rounds), len_list),\n@@ -469,15 +493,60 @@ invalid_input_of_rips = [\nnp.ones(n_rounds * len_list),\n\"pscore_cascade must be non-increasing sequence in each slate\",\n),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ \"4\", #\n+ \"evaluation_policy_pscore_cascade must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones((n_rounds, len_list)), #\n+ \"evaluation_policy_pscore_cascade must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list) + 1, #\n+ \"evaluation_policy_pscore_cascade must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.ones(n_rounds * len_list) - 1, #\n+ \"evaluation_policy_pscore_cascade must be in the range of\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ np.ones(n_rounds * len_list),\n+ np.tile(np.arange(len_list), n_rounds),\n+ np.hstack([[0.2], np.ones(n_rounds * len_list - 1)]), #\n+ \"evaluation_policy_pscore_cascade must be non-increasing sequence in each slate\",\n+ ),\n]\[email protected](\n- \"slate_id, reward, pscore_cascade, position, evaluation_policy_pscore, description\",\n+ \"slate_id, reward, pscore_cascade, position, evaluation_policy_pscore_cascade, description\",\ninvalid_input_of_rips,\n)\ndef test_rips_using_invalid_input_data(\n- slate_id, reward, pscore_cascade, position, evaluation_policy_pscore, description\n+ slate_id,\n+ reward,\n+ pscore_cascade,\n+ position,\n+ evaluation_policy_pscore_cascade,\n+ description,\n) -> None:\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n_ = rips.estimate_policy_value(\n@@ -485,14 +554,14 @@ def test_rips_using_invalid_input_data(\nreward=reward,\npscore_cascade=pscore_cascade,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,\n)\n_ = rips.estimate_interval(\nslate_id=slate_id,\nreward=reward,\npscore_cascade=pscore_cascade,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,\n)\n@@ -549,7 +618,7 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\nreward=reward,\npscore_item_position=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -559,7 +628,7 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\nreward=reward,\npscore_cascade=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -601,7 +670,7 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\nreward=reward,\npscore_item_position=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -611,7 +680,7 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\nreward=reward,\npscore_cascade=pscore,\nposition=position,\n- evaluation_policy_pscore=evaluation_policy_pscore,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -678,20 +747,22 @@ def test_slate_ope_performance_using_cascade_additive_log():\nreward=reward,\npscore_item_position=pscore_item_position,\nposition=position,\n- evaluation_policy_pscore=random_behavior_feedback[\"pscore_item_position\"],\n+ evaluation_policy_pscore_item_position=random_behavior_feedback[\n+ \"pscore_item_position\"\n+ ],\n)\nrips_estimated_policy_value = rips.estimate_policy_value(\nslate_id=slate_id,\nreward=reward,\npscore_cascade=pscore_cascade,\nposition=position,\n- evaluation_policy_pscore=random_behavior_feedback[\"pscore_cascade\"],\n+ evaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n)\n# compute statistics of ground truth policy value\nq_pi_e = (\nrandom_behavior_feedback[\"reward\"]\n.reshape((n_rounds, dataset.len_list))\n- .mean(axis=1)\n+ .sum(axis=1)\n)\ngt_mean = q_pi_e.mean()\ngt_std = q_pi_e.std(ddof=1)\n@@ -774,20 +845,22 @@ def test_slate_ope_performance_using_independent_log():\nreward=reward,\npscore_item_position=pscore_item_position,\nposition=position,\n- evaluation_policy_pscore=random_behavior_feedback[\"pscore_item_position\"],\n+ evaluation_policy_pscore_item_position=random_behavior_feedback[\n+ \"pscore_item_position\"\n+ ],\n)\nrips_estimated_policy_value = rips.estimate_policy_value(\nslate_id=slate_id,\nreward=reward,\npscore_cascade=pscore_cascade,\nposition=position,\n- evaluation_policy_pscore=random_behavior_feedback[\"pscore_cascade\"],\n+ evaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n)\n# compute statistics of ground truth policy value\nq_pi_e = (\nrandom_behavior_feedback[\"reward\"]\n.reshape((n_rounds, dataset.len_list))\n- .mean(axis=1)\n+ .sum(axis=1)\n)\ngt_mean = q_pi_e.mean()\ngt_std = q_pi_e.std(ddof=1)\n@@ -808,3 +881,101 @@ def test_slate_ope_performance_using_independent_log():\nassert (\nnp.abs(gt_mean - estimated_policy_value[key]) <= ci_bound\n), f\"OPE of {key} did not work well (absolute error is greater than 3*sigma)\"\n+\n+\n+def test_slate_ope_performance_using_standard_additive_log():\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 1000\n+ reward_structure = \"standard_additive\"\n+ click_model = None\n+ behavior_policy_function = linear_behavior_policy_logit\n+ reward_function = logistic_reward_function\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ random_state=random_state,\n+ behavior_policy_function=behavior_policy_function,\n+ base_reward_function=reward_function,\n+ )\n+ random_behavior_dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ random_state=random_state,\n+ behavior_policy_function=None,\n+ base_reward_function=reward_function,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ slate_id = bandit_feedback[\"slate_id\"]\n+ reward = bandit_feedback[\"reward\"]\n+ pscore = bandit_feedback[\"pscore\"]\n+ pscore_item_position = bandit_feedback[\"pscore_item_position\"]\n+ pscore_cascade = bandit_feedback[\"pscore_cascade\"]\n+ position = bandit_feedback[\"position\"]\n+\n+ # obtain random behavior feedback\n+ random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_exact_uniform_pscore_item_position=True\n+ )\n+\n+ sips_estimated_policy_value = sips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore\"],\n+ )\n+ iips_estimated_policy_value = iips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore_item_position=random_behavior_feedback[\n+ \"pscore_item_position\"\n+ ],\n+ )\n+ rips_estimated_policy_value = rips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n+ )\n+ # compute statistics of ground truth policy value\n+ q_pi_e = (\n+ random_behavior_feedback[\"reward\"]\n+ .reshape((n_rounds, dataset.len_list))\n+ .sum(axis=1)\n+ )\n+ gt_mean = q_pi_e.mean()\n+ gt_std = q_pi_e.std(ddof=1)\n+ print(\"Standard additive\")\n+ # check the performance of OPE\n+ ci_bound = gt_std * 3 / np.sqrt(q_pi_e.shape[0])\n+ print(f\"gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}\")\n+ estimated_policy_value = {\n+ \"sips\": sips_estimated_policy_value,\n+ \"iips\": iips_estimated_policy_value,\n+ \"rips\": rips_estimated_policy_value,\n+ }\n+ for key in estimated_policy_value:\n+ print(\n+ f\"estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, \"\n+ )\n+ # test the performance of each estimator\n+ assert (\n+ np.abs(gt_mean - estimated_policy_value[key]) <= ci_bound\n+ ), f\"OPE of {key} did not work well (absolute error is greater than 3*sigma)\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix column names related to evaluation_policy_pscore; add efficient bootstrap method |
641,005 | 01.05.2021 11:21:35 | -32,400 | 9e142a1c8bd47c4aa138c0e81e361cb1764ed2fd | Refactoring: add subclasses for linear and logistic policies | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/base.py",
"new_path": "obp/policy/base.py",
"diff": "@@ -111,12 +111,6 @@ class BaseContextualPolicy(metaclass=ABCMeta):\nbatch_size: int, default=1\nNumber of samples used in a batch parameter update.\n- alpha_: float, default=1.\n- Prior parameter for the online logistic regression.\n-\n- lambda_: float, default=1.\n- Regularization hyperparameter for the online logistic regression.\n-\nrandom_state: int, default=None\nControls the random seed in sampling actions.\n@@ -126,8 +120,6 @@ class BaseContextualPolicy(metaclass=ABCMeta):\nn_actions: int\nlen_list: int = 1\nbatch_size: int = 1\n- alpha_: float = 1.0\n- lambda_: float = 1.0\nrandom_state: Optional[int] = None\ndef __post_init__(self) -> None:\n@@ -155,20 +147,8 @@ class BaseContextualPolicy(metaclass=ABCMeta):\nf\"n_actions >= len_list should hold, but n_actions is {self.n_actions} and len_list is {self.len_list}\"\n)\n- if not isinstance(self.alpha_, float) or self.alpha_ <= 0.0:\n- raise ValueError(\n- f\"alpha_ should be a positive float, but {self.alpha_} is given\"\n- )\n-\n- if not isinstance(self.lambda_, float) or self.lambda_ <= 0.0:\n- raise ValueError(\n- f\"lambda_ should be a positive float, but {self.lambda_} is given\"\n- )\n-\nself.n_trial = 0\nself.random_ = check_random_state(self.random_state)\n- self.alpha_list = self.alpha_ * np.ones(self.n_actions)\n- self.lambda_list = self.lambda_ * np.ones(self.n_actions)\nself.action_counts = np.zeros(self.n_actions, dtype=int)\nself.reward_lists = [[] for _ in np.arange(self.n_actions)]\nself.context_lists = [[] for _ in np.arange(self.n_actions)]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/linear.py",
"new_path": "obp/policy/linear.py",
"diff": "@@ -10,7 +10,81 @@ from .base import BaseContextualPolicy\n@dataclass\n-class LinEpsilonGreedy(BaseContextualPolicy):\n+class BaseLinPolicy(BaseContextualPolicy):\n+ \"\"\"Linear Epsilon Greedy.\n+\n+ Parameters\n+ ------------\n+ dim: int\n+ Number of dimensions of context vectors.\n+\n+ n_actions: int\n+ Number of actions.\n+\n+ len_list: int, default=1\n+ Length of a list of actions recommended in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\n+ batch_size: int, default=1\n+ Number of samples used in a batch parameter update.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling actions.\n+\n+ epsilon: float, default=0.\n+ Exploration hyperparameter that must take value in the range of [0., 1.].\n+\n+ \"\"\"\n+\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize class.\"\"\"\n+ super().__post_init__()\n+ self.theta_hat = np.zeros((self.dim, self.n_actions))\n+ self.A_inv = np.concatenate(\n+ [np.identity(self.dim) for _ in np.arange(self.n_actions)]\n+ ).reshape(self.n_actions, self.dim, self.dim)\n+ self.b = np.zeros((self.dim, self.n_actions))\n+\n+ self.A_inv_temp = np.concatenate(\n+ [np.identity(self.dim) for _ in np.arange(self.n_actions)]\n+ ).reshape(self.n_actions, self.dim, self.dim)\n+ self.b_temp = np.zeros((self.dim, self.n_actions))\n+\n+ def update_params(self, action: int, reward: float, context: np.ndarray) -> None:\n+ \"\"\"Update policy parameters.\n+\n+ Parameters\n+ ------------\n+ action: int\n+ Selected action by the policy.\n+\n+ reward: float\n+ Observed reward for the chosen action and position.\n+\n+ context: array-like, shape (1, dim_context)\n+ Observed context vector.\n+\n+ \"\"\"\n+ self.n_trial += 1\n+ self.action_counts[action] += 1\n+ # update the inverse matrix by the Woodbury formula\n+ self.A_inv_temp[action] -= (\n+ self.A_inv_temp[action]\n+ @ context.T\n+ @ context\n+ @ self.A_inv_temp[action]\n+ / (1 + context @ self.A_inv_temp[action] @ context.T)[0][0]\n+ )\n+ self.b_temp[:, action] += reward * context.flatten()\n+ if self.n_trial % self.batch_size == 0:\n+ self.A_inv, self.b = (\n+ np.copy(self.A_inv_temp),\n+ np.copy(self.b_temp),\n+ )\n+\n+\n+@dataclass\n+class LinEpsilonGreedy(BaseLinPolicy):\n\"\"\"Linear Epsilon Greedy.\nParameters\n@@ -56,16 +130,6 @@ class LinEpsilonGreedy(BaseContextualPolicy):\nself.policy_name = f\"linear_epsilon_greedy_{self.epsilon}\"\nsuper().__post_init__()\n- self.theta_hat = np.zeros((self.dim, self.n_actions))\n- self.A_inv = np.concatenate(\n- [np.identity(self.dim) for _ in np.arange(self.n_actions)]\n- ).reshape(self.n_actions, self.dim, self.dim)\n- self.b = np.zeros((self.dim, self.n_actions))\n-\n- self.A_inv_temp = np.concatenate(\n- [np.identity(self.dim) for _ in np.arange(self.n_actions)]\n- ).reshape(self.n_actions, self.dim, self.dim)\n- self.b_temp = np.zeros((self.dim, self.n_actions))\ndef select_action(self, context: np.ndarray) -> np.ndarray:\n\"\"\"Select action for new data.\n@@ -101,41 +165,9 @@ class LinEpsilonGreedy(BaseContextualPolicy):\nself.n_actions, size=self.len_list, replace=False\n)\n- def update_params(self, action: int, reward: float, context: np.ndarray) -> None:\n- \"\"\"Update policy parameters.\n-\n- Parameters\n- ------------\n- action: int\n- Selected action by the policy.\n-\n- reward: float\n- Observed reward for the chosen action and position.\n-\n- context: array-like, shape (1, dim_context)\n- Observed context vector.\n-\n- \"\"\"\n- self.n_trial += 1\n- self.action_counts[action] += 1\n- # update the inverse matrix by the Woodbury formula\n- self.A_inv_temp[action] -= (\n- self.A_inv_temp[action]\n- @ context.T\n- @ context\n- @ self.A_inv_temp[action]\n- / (1 + context @ self.A_inv_temp[action] @ context.T)[0][0]\n- )\n- self.b_temp[:, action] += reward * context.flatten()\n- if self.n_trial % self.batch_size == 0:\n- self.A_inv, self.b = (\n- np.copy(self.A_inv_temp),\n- np.copy(self.b_temp),\n- )\n-\n@dataclass\n-class LinUCB(BaseContextualPolicy):\n+class LinUCB(BaseLinPolicy):\n\"\"\"Linear Upper Confidence Bound.\nParameters\n@@ -178,16 +210,6 @@ class LinUCB(BaseContextualPolicy):\nself.policy_name = f\"linear_ucb_{self.epsilon}\"\nsuper().__post_init__()\n- self.theta_hat = np.zeros((self.dim, self.n_actions))\n- self.A_inv = np.concatenate(\n- [np.identity(self.dim) for _ in np.arange(self.n_actions)]\n- ).reshape(self.n_actions, self.dim, self.dim)\n- self.b = np.zeros((self.dim, self.n_actions))\n-\n- self.A_inv_temp = np.concatenate(\n- [np.identity(self.dim) for _ in np.arange(self.n_actions)]\n- ).reshape(self.n_actions, self.dim, self.dim)\n- self.b_temp = np.zeros((self.dim, self.n_actions))\ndef select_action(self, context: np.ndarray) -> np.ndarray:\n\"\"\"Select action for new data.\n@@ -224,40 +246,9 @@ class LinUCB(BaseContextualPolicy):\nucb_scores = (context @ self.theta_hat + self.epsilon * sigma_hat).flatten()\nreturn ucb_scores.argsort()[::-1][: self.len_list]\n- def update_params(self, action: int, reward: float, context: np.ndarray) -> None:\n- \"\"\"Update policy parameters.\n-\n- Parameters\n- ----------\n- action: int\n- Selected action by the policy.\n-\n- reward: float\n- Observed reward for the chosen action and position.\n-\n- context: array-like, shape (1, dim_context)\n- Observed context vector.\n-\n- \"\"\"\n- self.n_trial += 1\n- self.action_counts[action] += 1\n- # update the inverse matrix by the Woodbury formula\n- self.A_inv_temp[action] -= (\n- self.A_inv_temp[action]\n- @ context.T\n- @ context\n- @ self.A_inv_temp[action]\n- / (1 + context @ self.A_inv_temp[action] @ context.T)[0][0]\n- )\n- self.b_temp[:, action] += reward * context.flatten()\n- if self.n_trial % self.batch_size == 0:\n- self.A_inv, self.b = (\n- np.copy(self.A_inv_temp),\n- np.copy(self.b_temp),\n- )\n-\n-class LinTS(BaseContextualPolicy):\n+@dataclass\n+class LinTS(BaseLinPolicy):\n\"\"\"Linear Thompson Sampling.\nParameters\n@@ -285,15 +276,6 @@ class LinTS(BaseContextualPolicy):\nself.policy_name = \"linear_ts\"\nsuper().__post_init__()\n- self.A_inv = np.concatenate(\n- [np.identity(self.dim) for _ in np.arange(self.n_actions)]\n- ).reshape(self.n_actions, self.dim, self.dim)\n- self.b = np.zeros((self.dim, self.n_actions))\n-\n- self.A_inv_temp = np.concatenate(\n- [np.identity(self.dim) for _ in np.arange(self.n_actions)]\n- ).reshape(self.n_actions, self.dim, self.dim)\n- self.b_temp = np.zeros((self.dim, self.n_actions))\ndef select_action(self, context: np.ndarray) -> np.ndarray:\n\"\"\"Select action for new data.\n@@ -309,7 +291,7 @@ class LinTS(BaseContextualPolicy):\nList of selected actions.\n\"\"\"\n- theta_hat = np.concatenate(\n+ self.theta_hat = np.concatenate(\n[\nself.A_inv[i] @ self.b[:, i][:, np.newaxis]\nfor i in np.arange(self.n_actions)\n@@ -318,7 +300,7 @@ class LinTS(BaseContextualPolicy):\n)\ntheta_sampled = np.concatenate(\n[\n- self.random_.multivariate_normal(theta_hat[:, i], self.A_inv[i])[\n+ self.random_.multivariate_normal(self.theta_hat[:, i], self.A_inv[i])[\n:, np.newaxis\n]\nfor i in np.arange(self.n_actions)\n@@ -328,35 +310,3 @@ class LinTS(BaseContextualPolicy):\npredicted_rewards = (context @ theta_sampled).flatten()\nreturn predicted_rewards.argsort()[::-1][: self.len_list]\n-\n- def update_params(self, action: int, reward: float, context: np.ndarray) -> None:\n- \"\"\"Update policy parameters.\n-\n- Parameters\n- ----------\n- action: int\n- Selected action by the policy.\n-\n- reward: float\n- Observed reward for the chosen action and position.\n-\n- context: array-like, shape (1, dim_context)\n- Observed context vector.\n-\n- \"\"\"\n- self.n_trial += 1\n- self.action_counts[action] += 1\n- # update the inverse matrix by the Woodbury formula\n- self.A_inv_temp[action] -= (\n- self.A_inv_temp[action]\n- @ context.T\n- @ context\n- @ self.A_inv_temp[action]\n- / (1 + context @ self.A_inv_temp[action] @ context.T)[0][0]\n- )\n- self.b_temp[:, action] += reward * context.flatten()\n- if self.n_trial % self.batch_size == 0:\n- self.A_inv, self.b = (\n- np.copy(self.A_inv_temp),\n- np.copy(self.b_temp),\n- )\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/logistic.py",
"new_path": "obp/policy/logistic.py",
"diff": "@@ -14,11 +14,11 @@ from ..utils import sigmoid\n@dataclass\n-class LogisticEpsilonGreedy(BaseContextualPolicy):\n- \"\"\"Logistic Epsilon Greedy.\n+class BaseLogisticPolicy(BaseContextualPolicy):\n+ \"\"\"Base class for contextual bandit policies using logistic regression.\nParameters\n- -----------\n+ ----------\ndim: int\nNumber of dimensions of context vectors.\n@@ -41,54 +41,35 @@ class LogisticEpsilonGreedy(BaseContextualPolicy):\nrandom_state: int, default=None\nControls the random seed in sampling actions.\n- epsilon: float, default=0.\n- Exploration hyperparameter that must take value in the range of [0., 1.].\n-\n\"\"\"\n- epsilon: float = 0.0\n+ alpha_: float = 1.0\n+ lambda_: float = 1.0\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if not 0 <= self.epsilon <= 1:\n+ super().__post_init__()\n+ if not isinstance(self.alpha_, float) or self.alpha_ <= 0.0:\nraise ValueError(\n- f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ f\"alpha_ should be a positive float, but {self.alpha_} is given\"\n)\n- self.policy_name = f\"logistic_egreedy_{self.epsilon}\"\n- super().__post_init__()\n+ if not isinstance(self.lambda_, float) or self.lambda_ <= 0.0:\n+ raise ValueError(\n+ f\"lambda_ should be a positive float, but {self.lambda_} is given\"\n+ )\n+\n+ self.alpha_list = self.alpha_ * np.ones(self.n_actions)\n+ self.lambda_list = self.lambda_ * np.ones(self.n_actions)\nself.model_list = [\nMiniBatchLogisticRegression(\n- lambda_=self.lambda_list[i], alpha=self.alpha_list[i], dim=self.dim\n+ lambda_=self.lambda_list[i],\n+ alpha=self.alpha_list[i],\n+ dim=self.dim,\n+ random_state=self.random_state,\n)\nfor i in np.arange(self.n_actions)\n]\n- self.reward_lists = [[] for _ in np.arange(self.n_actions)]\n- self.context_lists = [[] for _ in np.arange(self.n_actions)]\n-\n- def select_action(self, context: np.ndarray) -> np.ndarray:\n- \"\"\"Select action for new data.\n-\n- Parameters\n- ----------\n- context: array-like, shape (1, dim_context)\n- Observed context vector.\n-\n- Returns\n- ----------\n- selected_actions: array-like, shape (len_list, )\n- List of selected actions.\n-\n- \"\"\"\n- if self.random_.rand() > self.epsilon:\n- theta = np.array(\n- [model.predict_proba(context) for model in self.model_list]\n- ).flatten()\n- return theta.argsort()[::-1][: self.len_list]\n- else:\n- return self.random_.choice(\n- self.n_actions, size=self.len_list, replace=False\n- )\ndef update_params(self, action: int, reward: float, context: np.ndarray) -> None:\n\"\"\"Update policy parameters.\n@@ -121,11 +102,11 @@ class LogisticEpsilonGreedy(BaseContextualPolicy):\n@dataclass\n-class LogisticUCB(BaseContextualPolicy):\n- \"\"\"Logistic Upper Confidence Bound.\n+class LogisticEpsilonGreedy(BaseLogisticPolicy):\n+ \"\"\"Logistic Epsilon Greedy.\nParameters\n- ------------\n+ -----------\ndim: int\nNumber of dimensions of context vectors.\n@@ -139,15 +120,85 @@ class LogisticUCB(BaseContextualPolicy):\nbatch_size: int, default=1\nNumber of samples used in a batch parameter update.\n+ random_state: int, default=None\n+ Controls the random seed in sampling actions.\n+\nalpha_: float, default=1.\nPrior parameter for the online logistic regression.\nlambda_: float, default=1.\nRegularization hyperparameter for the online logistic regression.\n+ epsilon: float, default=0.\n+ Exploration hyperparameter that must take value in the range of [0., 1.].\n+\n+ \"\"\"\n+\n+ epsilon: float = 0.0\n+\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize class.\"\"\"\n+ if not 0 <= self.epsilon <= 1:\n+ raise ValueError(\n+ f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n+ )\n+ self.policy_name = f\"logistic_egreedy_{self.epsilon}\"\n+\n+ super().__post_init__()\n+\n+ def select_action(self, context: np.ndarray) -> np.ndarray:\n+ \"\"\"Select action for new data.\n+\n+ Parameters\n+ ----------\n+ context: array-like, shape (1, dim_context)\n+ Observed context vector.\n+\n+ Returns\n+ ----------\n+ selected_actions: array-like, shape (len_list, )\n+ List of selected actions.\n+\n+ \"\"\"\n+ if self.random_.rand() > self.epsilon:\n+ theta = np.array(\n+ [model.predict_proba(context) for model in self.model_list]\n+ ).flatten()\n+ return theta.argsort()[::-1][: self.len_list]\n+ else:\n+ return self.random_.choice(\n+ self.n_actions, size=self.len_list, replace=False\n+ )\n+\n+\n+@dataclass\n+class LogisticUCB(BaseLogisticPolicy):\n+ \"\"\"Logistic Upper Confidence Bound.\n+\n+ Parameters\n+ ------------\n+ dim: int\n+ Number of dimensions of context vectors.\n+\n+ n_actions: int\n+ Number of actions.\n+\n+ len_list: int, default=1\n+ Length of a list of actions recommended in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\n+ batch_size: int, default=1\n+ Number of samples used in a batch parameter update.\n+\nrandom_state: int, default=None\nControls the random seed in sampling actions.\n+ alpha_: float, default=1.\n+ Prior parameter for the online logistic regression.\n+\n+ lambda_: float, default=1.\n+ Regularization hyperparameter for the online logistic regression.\n+\nepsilon: float, default=0.\nExploration hyperparameter that must take value in the range of [0., 1.].\n@@ -169,14 +220,6 @@ class LogisticUCB(BaseContextualPolicy):\nself.policy_name = f\"logistic_ucb_{self.epsilon}\"\nsuper().__post_init__()\n- self.model_list = [\n- MiniBatchLogisticRegression(\n- lambda_=self.lambda_list[i], alpha=self.alpha_list[i], dim=self.dim\n- )\n- for i in np.arange(self.n_actions)\n- ]\n- self.reward_lists = [[] for _ in np.arange(self.n_actions)]\n- self.context_lists = [[] for _ in np.arange(self.n_actions)]\ndef select_action(self, context: np.ndarray) -> np.ndarray:\n\"\"\"Select action for new data.\n@@ -204,38 +247,9 @@ class LogisticUCB(BaseContextualPolicy):\nucb_score = theta + self.epsilon * std\nreturn ucb_score.argsort()[::-1][: self.len_list]\n- def update_params(self, action: int, reward: float, context: np.ndarray) -> None:\n- \"\"\"Update policy parameters.\n-\n- Parameters\n- ------------\n- action: int\n- Selected action by the policy.\n-\n- reward: float\n- Observed reward for the chosen action and position.\n-\n- context: array-like, shape (1, dim_context)\n- Observed context vector.\n-\n- \"\"\"\n- self.n_trial += 1\n- self.action_counts[action] += 1\n- self.reward_lists[action].append(reward)\n- self.context_lists[action].append(context)\n- if self.n_trial % self.batch_size == 0:\n- for action, model in enumerate(self.model_list):\n- if not len(self.reward_lists[action]) == 0:\n- model.fit(\n- X=np.concatenate(self.context_lists[action], axis=0),\n- y=np.array(self.reward_lists[action]),\n- )\n- self.reward_lists = [[] for _ in np.arange(self.n_actions)]\n- self.context_lists = [[] for _ in np.arange(self.n_actions)]\n-\n@dataclass\n-class LogisticTS(BaseContextualPolicy):\n+class LogisticTS(BaseLogisticPolicy):\n\"\"\"Logistic Thompson Sampling.\nParameters\n@@ -253,15 +267,15 @@ class LogisticTS(BaseContextualPolicy):\nbatch_size: int, default=1\nNumber of samples used in a batch parameter update.\n+ random_state: int, default=None\n+ Controls the random seed in sampling actions.\n+\nalpha_: float, default=1.\nPrior parameter for the online logistic regression.\nlambda_: float, default=1.\nRegularization hyperparameter for the online logistic regression.\n- random_state: int, default=None\n- Controls the random seed in sampling actions.\n-\nReferences\n----------\nOlivier Chapelle and Lihong Li.\n@@ -274,17 +288,6 @@ class LogisticTS(BaseContextualPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\nsuper().__post_init__()\n- self.model_list = [\n- MiniBatchLogisticRegression(\n- lambda_=self.lambda_list[i],\n- alpha=self.alpha_list[i],\n- dim=self.dim,\n- random_state=self.random_state,\n- )\n- for i in np.arange(self.n_actions)\n- ]\n- self.reward_lists = [[] for _ in np.arange(self.n_actions)]\n- self.context_lists = [[] for _ in np.arange(self.n_actions)]\ndef select_action(self, context: np.ndarray) -> np.ndarray:\n\"\"\"Select action for new data.\n@@ -305,35 +308,6 @@ class LogisticTS(BaseContextualPolicy):\n).flatten()\nreturn theta.argsort()[::-1][: self.len_list]\n- def update_params(self, action: int, reward: float, context: np.ndarray) -> None:\n- \"\"\"Update policy parameters.\n-\n- Parameters\n- ----------\n- action: int\n- Selected action by the policy.\n-\n- reward: float\n- Observed reward for the chosen action and position.\n-\n- context: array-like, shape (1, dim_context)\n- Observed context vector.\n-\n- \"\"\"\n- self.n_trial += 1\n- self.action_counts[action] += 1\n- self.reward_lists[action].append(reward)\n- self.context_lists[action].append(context)\n- if self.n_trial % self.batch_size == 0:\n- for action, model in enumerate(self.model_list):\n- if not len(self.reward_lists[action]) == 0:\n- model.fit(\n- X=np.concatenate(self.context_lists[action], axis=0),\n- y=np.array(self.reward_lists[action]),\n- )\n- self.reward_lists = [[] for _ in np.arange(self.n_actions)]\n- self.context_lists = [[] for _ in np.arange(self.n_actions)]\n-\n@dataclass\nclass MiniBatchLogisticRegression:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | Refactoring: add subclasses for linear and logistic policies |
641,005 | 02.05.2021 13:46:27 | -32,400 | 73bf6f1072f59bcdc229127eff9a176ee3a6c8b4 | Enable to use a dict of estimated_rewards_by_regmodel | [
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/benchmark_off_policy_estimators.py",
"new_path": "benchmark/ope/benchmark_off_policy_estimators.py",
"diff": "@@ -26,6 +26,7 @@ ope_estimators = [\nInverseProbabilityWeighting(),\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\n+ DoublyRobust(estimator_name=\"mrdr\"),\nSelfNormalizedDoublyRobust(),\nSwitchDoublyRobust(tau=5.0, estimator_name=\"switch-dr (tau=5)\"),\nSwitchDoublyRobust(tau=10.0, estimator_name=\"switch-dr (tau=10)\"),\n@@ -177,12 +178,18 @@ if __name__ == \"__main__\":\nfor key_ in [\"context\", \"action\", \"reward\", \"pscore\", \"position\"]:\nbandit_feedback[key_] = bandit_feedback[key_][~is_for_reg_model]\n# estimate the mean reward function using the pre-trained reg_model\n- estimated_rewards_by_reg_model = reg_model.predict(\n+ estimated_rewards_by_reg_model_default = reg_model.predict(\ncontext=bandit_feedback[\"context\"],\n)\nestimated_rewards_by_reg_model_mrdr = reg_model_mrdr.predict(\ncontext=bandit_feedback[\"context\"],\n)\n+ estimated_rewards_by_reg_model = {\n+ estimator.estimator_name: estimated_rewards_by_reg_model_mrdr\n+ if estimator.estimator_name == \"mrdr\"\n+ else estimated_rewards_by_reg_model_default\n+ for estimator in ope_estimators\n+ }\n# evaluate the estimation performance of OPE estimators\nope = OffPolicyEvaluation(\nbandit_feedback=bandit_feedback,\n@@ -196,11 +203,6 @@ if __name__ == \"__main__\":\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n)\n- relative_ee_b[\"mrdr\"] = ope.evaluate_performance_of_estimators(\n- ground_truth_policy_value=ground_truth_policy_value,\n- action_dist=action_dist,\n- estimated_rewards_by_reg_model=estimated_rewards_by_reg_model_mrdr,\n- )[\"dr\"]\nreturn relative_ee_b\n@@ -210,8 +212,7 @@ if __name__ == \"__main__\":\n)([delayed(process)(i) for i in np.arange(n_runs)])\n# save results of the evaluation of ope in './logs' directory.\n- estimator_names = [est.estimator_name for est in ope_estimators] + [\"mrdr\"]\n- relative_ee = {est: np.zeros(n_runs) for est in estimator_names}\n+ relative_ee = {est.estimator_name: np.zeros(n_runs) for est in ope_estimators}\nfor b, relative_ee_b in enumerate(processed):\nfor (\nestimator_name,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta.py",
"new_path": "obp/ope/meta.py",
"diff": "\"\"\"Off-Policy Evaluation Class to Streamline OPE.\"\"\"\nfrom dataclasses import dataclass\nfrom logging import getLogger\n-from typing import Dict, List, Optional, Tuple\n+from typing import Dict, List, Optional, Tuple, Union\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n@@ -90,8 +90,10 @@ class OffPolicyEvaluation:\ndef _create_estimator_inputs(\nself,\naction_dist: np.ndarray,\n- estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n- ) -> Dict[str, np.ndarray]:\n+ estimated_rewards_by_reg_model: Optional[\n+ Union[np.ndarray, Dict[str, np.ndarray]]\n+ ] = None,\n+ ) -> Dict[str, Dict[str, np.ndarray]]:\n\"\"\"Create input dictionary to estimate policy value by subclasses of `BaseOffPolicyEstimator`\"\"\"\nif not isinstance(action_dist, np.ndarray):\nraise ValueError(\"action_dist must be ndarray\")\n@@ -103,16 +105,41 @@ class OffPolicyEvaluation:\nlogger.warning(\n\"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n)\n+ elif isinstance(estimated_rewards_by_reg_model, dict):\n+ for estimator_name, value in estimated_rewards_by_reg_model.items():\n+ if value is None:\n+ raise ValueError(\n+ f\"estimated_rewards_by_reg_model[{estimator_name}] must be ndarray\"\n+ )\n+ elif value.shape != action_dist.shape:\n+ raise ValueError(\n+ f\"estimated_rewards_by_reg_model[{estimator_name}].shape must be the same as action_dist.shape\"\n+ )\nelif estimated_rewards_by_reg_model.shape != action_dist.shape:\nraise ValueError(\n\"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\"\n)\nestimator_inputs = {\n+ estimator_name: {\ninput_: self.bandit_feedback[input_]\nfor input_ in [\"reward\", \"action\", \"position\", \"pscore\"]\n}\n- estimator_inputs[\"action_dist\"] = action_dist\n- estimator_inputs[\n+ for estimator_name in self.ope_estimators_\n+ }\n+\n+ for estimator_name in self.ope_estimators_:\n+ estimator_inputs[estimator_name][\"action_dist\"] = action_dist\n+ if isinstance(estimated_rewards_by_reg_model, dict):\n+ if estimator_name in estimated_rewards_by_reg_model:\n+ estimator_inputs[estimator_name][\n+ \"estimated_rewards_by_reg_model\"\n+ ] = estimated_rewards_by_reg_model[estimator_name]\n+ else:\n+ estimator_inputs[estimator_name][\n+ \"estimated_rewards_by_reg_model\"\n+ ] = None\n+ else:\n+ estimator_inputs[estimator_name][\n\"estimated_rewards_by_reg_model\"\n] = estimated_rewards_by_reg_model\n@@ -121,7 +148,9 @@ class OffPolicyEvaluation:\ndef estimate_policy_values(\nself,\naction_dist: np.ndarray,\n- estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[\n+ Union[np.ndarray, Dict[str, np.ndarray]]\n+ ] = None,\n) -> Dict[str, float]:\n\"\"\"Estimate policy value of an evaluation policy.\n@@ -130,9 +159,11 @@ class OffPolicyEvaluation:\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n- estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n- When None is given, model-dependent estimators such as DM and DR cannot be used.\n+ When an array-like is given, all OPE estimators use it.\n+ When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When it is not given, model-dependent estimators such as DM and DR cannot be used.\nReturns\n----------\n@@ -147,7 +178,7 @@ class OffPolicyEvaluation:\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\npolicy_value_dict[estimator_name] = estimator.estimate_policy_value(\n- **estimator_inputs\n+ **estimator_inputs[estimator_name]\n)\nreturn policy_value_dict\n@@ -155,7 +186,9 @@ class OffPolicyEvaluation:\ndef estimate_intervals(\nself,\naction_dist: np.ndarray,\n- estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[\n+ Union[np.ndarray, Dict[str, np.ndarray]]\n+ ] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 100,\nrandom_state: Optional[int] = None,\n@@ -167,8 +200,10 @@ class OffPolicyEvaluation:\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n- estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ When an array-like is given, all OPE estimators use it.\n+ When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\nalpha: float, default=0.05\n@@ -199,7 +234,7 @@ class OffPolicyEvaluation:\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\npolicy_value_interval_dict[estimator_name] = estimator.estimate_interval(\n- **estimator_inputs,\n+ **estimator_inputs[estimator_name],\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -210,7 +245,9 @@ class OffPolicyEvaluation:\ndef summarize_off_policy_estimates(\nself,\naction_dist: np.ndarray,\n- estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[\n+ Union[np.ndarray, Dict[str, np.ndarray]]\n+ ] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 100,\nrandom_state: Optional[int] = None,\n@@ -222,8 +259,10 @@ class OffPolicyEvaluation:\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n- estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ When an array-like is given, all OPE estimators use it.\n+ When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\nalpha: float, default=0.05\n@@ -273,7 +312,9 @@ class OffPolicyEvaluation:\ndef visualize_off_policy_estimates(\nself,\naction_dist: np.ndarray,\n- estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[\n+ Union[np.ndarray, Dict[str, np.ndarray]]\n+ ] = None,\nalpha: float = 0.05,\nis_relative: bool = False,\nn_bootstrap_samples: int = 100,\n@@ -288,8 +329,10 @@ class OffPolicyEvaluation:\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n- estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ When an array-like is given, all OPE estimators use it.\n+ When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\nalpha: float, default=0.05\n@@ -326,7 +369,7 @@ class OffPolicyEvaluation:\nfor estimator_name, estimator in self.ope_estimators_.items():\nestimated_round_rewards_dict[\nestimator_name\n- ] = estimator._estimate_round_rewards(**estimator_inputs)\n+ ] = estimator._estimate_round_rewards(**estimator_inputs[estimator_name])\nestimated_round_rewards_df = DataFrame(estimated_round_rewards_dict)\nestimated_round_rewards_df.rename(\ncolumns={key: key.upper() for key in estimated_round_rewards_dict.keys()},\n@@ -358,7 +401,9 @@ class OffPolicyEvaluation:\nself,\nground_truth_policy_value: float,\naction_dist: np.ndarray,\n- estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[\n+ Union[np.ndarray, Dict[str, np.ndarray]]\n+ ] = None,\nmetric: str = \"relative-ee\",\n) -> Dict[str, float]:\n\"\"\"Evaluate estimation performances of OPE estimators.\n@@ -387,8 +432,10 @@ class OffPolicyEvaluation:\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n- estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+ When an array-like is given, all OPE estimators use it.\n+ When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\nmetric: str, default=\"relative-ee\"\n@@ -421,7 +468,9 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\n- estimated_policy_value = estimator.estimate_policy_value(**estimator_inputs)\n+ estimated_policy_value = estimator.estimate_policy_value(\n+ **estimator_inputs[estimator_name]\n+ )\nif metric == \"relative-ee\":\nrelative_ee_ = estimated_policy_value - ground_truth_policy_value\nrelative_ee_ /= ground_truth_policy_value\n@@ -435,7 +484,9 @@ class OffPolicyEvaluation:\nself,\nground_truth_policy_value: float,\naction_dist: np.ndarray,\n- estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[\n+ Union[np.ndarray, Dict[str, np.ndarray]]\n+ ] = None,\nmetric: str = \"relative-ee\",\n) -> DataFrame:\n\"\"\"Summarize performance comparisons of OPE estimators.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_meta.py",
"new_path": "tests/ope/test_meta.py",
"diff": "@@ -317,6 +317,17 @@ invalid_input_of_create_estimator_inputs = [\nnp.zeros((2, 3, 3)),\n\"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\",\n),\n+ (\n+ np.zeros((2, 3, 4)),\n+ {\"dm\": np.zeros((2, 3, 3))},\n+ \"estimated_rewards_by_reg_model\\[dm\\].shape must be the same as action_dist.shape\",\n+ ),\n+ (\n+ np.zeros((2, 3, 4)),\n+ {\"dm\": None},\n+ \"estimated_rewards_by_reg_model\\[dm\\] must be ndarray\",\n+ ),\n+ (np.zeros((2, 3)), None, \"action_dist.ndim must be 3-dimensional\"),\n(np.zeros((2, 3)), None, \"action_dist.ndim must be 3-dimensional\"),\n(\"3\", None, \"action_dist must be ndarray\"),\n(None, None, \"action_dist must be ndarray\"),\n@@ -328,6 +339,11 @@ valid_input_of_create_estimator_inputs = [\nnp.zeros((2, 3, 4)),\n\"same shape\",\n),\n+ (\n+ np.zeros((2, 3, 4)),\n+ {\"dm\": np.zeros((2, 3, 4))},\n+ \"same shape\",\n+ ),\n(np.zeros((2, 3, 1)), None, \"estimated_rewards_by_reg_model is None\"),\n]\n@@ -404,7 +420,8 @@ def test_meta_create_estimator_inputs_using_valid_input_data(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n)\n- assert set(estimator_inputs.keys()) == set(\n+ assert set(estimator_inputs.keys()) == set([\"ipw\"])\n+ assert set(estimator_inputs[\"ipw\"].keys()) == set(\n[\n\"reward\",\n\"action\",\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | Enable to use a dict of estimated_rewards_by_regmodel |
641,011 | 05.05.2021 20:39:35 | -32,400 | 0b0df8cafa0f2fe80376f4e4ab2c86f3dd90fdf2 | fix pscore_item_position calculation | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -376,7 +376,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nfor action_list in permutations(\nnp.arange(self.n_unique_action), self.len_list\n):\n- if sampled_action_index not in action_list:\n+ if sampled_action_index != action_list[position_]:\ncontinue\npscore_item_position_i_l += self.calc_item_position_pscore(\naction_list=action_list,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix pscore_item_position calculation |
641,011 | 06.05.2021 11:52:27 | -32,400 | 57cae57736f00a8f4887c4d3ac41a6544b880b34 | rm return_exact_uniform_pscore_item_position | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -75,7 +75,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nbehavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default=None\nFunction generating logit value of each action in action space,\ni.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n- If None is set, context **independent** uniform distribution will be used (uniform random behavior policy).\n+ If None is set, context **independent** uniform distribution will be used (uniform behavior policy).\nrandom_state: int, default=12345\nControls the random seed in sampling synthetic slate bandit dataset.\n@@ -242,7 +242,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nelse:\nself.action_interaction_matrix = np.identity(self.len_list)\nif self.behavior_policy_function is None:\n- self.behavior_policy = np.ones(self.n_unique_action) / self.n_unique_action\n+ self.uniform_behavior_policy = (\n+ np.ones(self.n_unique_action) / self.n_unique_action\n+ )\nif self.reward_type == \"continuous\":\nself.reward_min = 0\nself.reward_max = 1e10\n@@ -295,7 +297,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nbehavior_policy_logit_: np.ndarray,\nn_rounds: int,\nreturn_pscore_item_position: bool = True,\n- return_exact_uniform_pscore_item_position: bool = False,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray]]:\n\"\"\"Sample action and obtain pscores.\n@@ -311,10 +312,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen n_actions and len_list are large, giving True to this parameter may lead to a large computational time.\n- return_exact_uniform_pscore_item_position: bool, default=False\n- A boolean parameter whether `pscore_item_position` of uniform random policy is returned or not.\n- When True is given, actions are sampled by the uniform random behavior policy.\n-\nReturns\n----------\naction: array-like, shape (n_unique_action * len_list)\n@@ -367,7 +364,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\n# calculate marginal pscore\nif return_pscore_item_position:\n- if return_exact_uniform_pscore_item_position:\n+ if self.behavior_policy_function is None: # uniform random\npscore_item_position_i_l = 1 / self.n_unique_action\nelif position_ == 0:\npscore_item_position_i_l = pscore_i\n@@ -452,7 +449,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nn_rounds: int,\ntau: Union[int, float] = 1.0,\nreturn_pscore_item_position: bool = True,\n- return_exact_uniform_pscore_item_position: bool = False,\n) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\n@@ -469,11 +465,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen `n_unique_action` and `len_list` are large, this parameter should be set to False because of the computational time.\n- return_exact_uniform_pscore_item_position: bool, default=False\n- A boolean parameter whether `pscore_item_position` of uniform random policy is returned or not.\n- When using uniform random policy, this parameter should be set to True.\n-\n-\nReturns\n---------\nbandit_feedback: BanditFeedback\n@@ -484,18 +475,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"n_rounds must be a positive integer, but {n_rounds} is given\"\n)\n- if (\n- return_exact_uniform_pscore_item_position\n- and self.behavior_policy_function is not None\n- ):\n- raise ValueError(\n- \"when return_exact_uniform_pscore_item_position is True, behavior_policy_function must not be specified (must be random)\"\n- )\ncontext = self.random_.normal(size=(n_rounds, self.dim_context))\n# sample actions for each round based on the behavior policy\nif self.behavior_policy_function is None:\n- behavior_policy_logit_ = np.tile(self.behavior_policy, (n_rounds, 1))\n+ behavior_policy_logit_ = np.tile(\n+ self.uniform_behavior_policy, (n_rounds, 1)\n+ )\nelse:\nbehavior_policy_logit_ = self.behavior_policy_function(\ncontext=context,\n@@ -518,7 +504,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nbehavior_policy_logit_=behavior_policy_logit_,\nn_rounds=n_rounds,\nreturn_pscore_item_position=return_pscore_item_position,\n- return_exact_uniform_pscore_item_position=return_exact_uniform_pscore_item_position,\n)\n# sample expected reward factual\nif self.base_reward_function is None:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | rm return_exact_uniform_pscore_item_position |
641,006 | 16.05.2021 17:02:06 | -32,400 | b0ff440712d3bd240fc549dec3b218a01b785711 | add generate_evaluation_policy_pscore and calc_on_policy_value | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "from dataclasses import dataclass\nfrom typing import Optional, Callable, Tuple, Union, List\nfrom itertools import permutations\n+from math import factorial\nimport numpy as np\nfrom scipy.stats import truncnorm\n@@ -318,19 +319,19 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nReturns\n----------\n- action: array-like, shape (n_unique_action * len_list)\n+ action: array-like, shape (n_rounds * len_list)\nActions sampled by a behavior policy.\nAction list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`]\n- pscore_cascade: array-like, shape (n_unique_action * len_list)\n+ pscore_cascade: array-like, shape (n_rounds * len_list)\nJoint action choice probabilities above the slot (:math:`k`) in each slate given context (:math:`x`).\ni.e., :math:`\\\\pi_k: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{k})`.\n- pscore: array-like, shape (n_unique_action * len_list)\n+ pscore: array-like, shape (n_rounds * len_list)\nJoint action choice probabilities of the slate given context (:math:`x`).\ni.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{\\\\text{len_list}})`.\n- pscore_item_position: array-like, shape (n_unique_action * len_list)\n+ pscore_item_position: array-like, shape (n_rounds * len_list)\nMarginal action choice probabilities of each slot given context (:math:`x`).\ni.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n@@ -411,7 +412,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nReturns\n----------\n- reward: array-like, shape (n_unique_action, len_list)\n+ reward: array-like, shape (n_rounds, len_list)\n\"\"\"\nexpected_reward_factual *= self.exam_weight\n@@ -562,6 +563,234 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore_item_position=pscore_item_position,\n)\n+ def calc_on_policy_value(self, reward: np.ndarray, slate_id: np.ndarray) -> float:\n+ \"\"\"Calculate the policy value of given reward and slate_id.\n+\n+ Parameters\n+ -----------\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+\n+ slate_id: array-like, shape (<= n_rounds * len_list,)\n+ Slate id observed in each round of the logged bandit feedback.\n+\n+ Returns\n+ ----------\n+ policy_value: float\n+ The policy value of the given reward and slate_id.\n+\n+ \"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(slate_id, np.ndarray):\n+ raise ValueError(\"slate_id must be ndarray\")\n+ if reward.ndim != 1:\n+ raise ValueError(f\"reward must be 1-dimensional, but is {reward.ndim}.\")\n+ if slate_id.ndim != 1:\n+ raise ValueError(f\"slate_id must be 1-dimensional, but is {slate_id.ndim}.\")\n+ if reward.shape[0] != slate_id.shape[0]:\n+ raise ValueError(\n+ \"the size of axis 0 of reward must be the same as that of slate_id\"\n+ )\n+\n+ return reward.sum() / np.unique(slate_id).shape[0]\n+\n+ def generate_evaluation_policy_pscore(\n+ self,\n+ evaluation_policy_type: str,\n+ context: np.ndarray,\n+ random_state: int,\n+ epsilon: Optional[float] = 1.0,\n+ action_2d: Optional[np.ndarray] = None,\n+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n+ \"\"\"Generate pscores of three types of evaluation policies ('random', 'optimal', 'anti-optimal').\n+\n+ Parameters\n+ -----------\n+ evaluation_policy_type: str\n+ Type of evaluation policy, which must be one of 'optimal', 'anti-optimal', or 'random'.\n+ When 'optimal' is given, we sort actions by their base expected rewards (outputs of `base_reward_function`) and extract top-L actions (L=`len_list`) for each slate.\n+ When 'anti-optimal' is given, we sort actions by their base expected rewards (outputs of `base_reward_function`) and extract bottom-L actions (L=`len_list`) for each slate.\n+ We calculate three propensity scores of the epsilon-greedy policy.\n+ When 'random' is given, we calculate three propensity scores of the random policy.\n+\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ random_state: int\n+ Controls the random seed in sampling synthetic slate bandit dataset.\n+\n+ epsilon: float, default=1.\n+ Exploration hyperparameter that must take value in the range of [0., 1.].\n+ When evaluation_policy_type is 'random', this argument is unnecessary.\n+\n+ action_2d: array-like, shape (n_rounds, len_list), default=None\n+ Actions sampled by a behavior policy.\n+ Action list of slate `i` is stored in action[`i`].\n+ When bandit_feedback is obtained by `obtain_batch_bandit_feedback`, we can obtain action_2d as follows: bandit_feedback[\"action\"].reshape((n_rounds, len_list))\n+ When evaluation_policy_type is 'random', this argument is unnecessary.\n+\n+\n+ Returns\n+ ----------\n+ pscore: array-like, shape (n_unique_action * len_list)\n+ Joint action choice probabilities of the slate given context (:math:`x`).\n+ i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{\\\\text{len_list}})`.\n+\n+ pscore_item_position: array-like, shape (n_unique_action * len_list)\n+ Marginal action choice probabilities of each slot given context (:math:`x`).\n+ i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+\n+ pscore_cascade: array-like, shape (n_unique_action * len_list)\n+ Joint action choice probabilities above the slot (:math:`k`) in each slate given context (:math:`x`).\n+ i.e., :math:`\\\\pi_k: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{k})`.\n+\n+ \"\"\"\n+ if evaluation_policy_type not in [\"optimal\", \"anti-optimal\", \"random\"]:\n+ raise ValueError(\n+ f\"evaluation_policy_type must be 'optimal', 'anti-optimal', or 'random', but {evaluation_policy_type} is given\"\n+ )\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ # [Caution]: OverflowError raises when integer division result is too large for a float\n+ cascade_npr = [\n+ factorial(self.n_unique_action) / factorial(self.n_unique_action - x - 1)\n+ for x in np.arange(self.len_list)\n+ ]\n+ random_pscore = np.ones(context.shape[0] * self.len_list) / cascade_npr[-1]\n+ random_pscore_item_position = (\n+ np.ones(context.shape[0] * self.len_list) / self.n_unique_action\n+ )\n+ random_pscore_cascade = 1.0 / np.tile(cascade_npr, context.shape[0])\n+ if evaluation_policy_type == \"random\":\n+ pscore = random_pscore\n+ pscore_item_position = random_pscore_item_position\n+ pscore_cascade = random_pscore_cascade\n+\n+ else:\n+ # base_expected_reward: array-like, shape (n_rounds, n_unique_action)\n+ base_expected_reward = self.base_reward_function(\n+ context=context,\n+ action_context=self.action_context,\n+ random_state=random_state,\n+ )\n+ if not isinstance(action_2d, np.ndarray) or action_2d.ndim != 2:\n+ raise ValueError(\"action_2d must be 2-dimensional ndarray\")\n+ if context.shape[0] != action_2d.shape[0]:\n+ raise ValueError(\n+ \"the size of axis 0 of context must be the same as that of action_2d\"\n+ )\n+ if set([np.unique(x).shape[0] for x in action_2d]) != set([self.len_list]):\n+ raise ValueError(\"actions of each slate must not be duplicated\")\n+\n+ check_scalar(\n+ epsilon, name=\"epsilon\", target_type=(float), min_val=0.0, max_val=1.0\n+ )\n+ if evaluation_policy_type == \"optimal\":\n+ sorted_actions = base_expected_reward.argsort(axis=1)[\n+ :, -self.len_list :\n+ ]\n+ (\n+ pscore,\n+ pscore_item_position,\n+ pscore_cascade,\n+ ) = self._calc_epsilon_greedy_pscore(\n+ epsilon=epsilon,\n+ action_2d=action_2d,\n+ sorted_actions=sorted_actions,\n+ random_pscore=random_pscore,\n+ random_pscore_item_position=random_pscore_item_position,\n+ random_pscore_cascade=random_pscore_cascade,\n+ )\n+ else:\n+ sorted_actions = base_expected_reward.argsort(axis=1)[\n+ :, : self.len_list\n+ ]\n+ (\n+ pscore,\n+ pscore_item_position,\n+ pscore_cascade,\n+ ) = self._calc_epsilon_greedy_pscore(\n+ epsilon=epsilon,\n+ action_2d=action_2d,\n+ sorted_actions=sorted_actions,\n+ random_pscore=random_pscore,\n+ random_pscore_item_position=random_pscore_item_position,\n+ random_pscore_cascade=random_pscore_cascade,\n+ )\n+ return pscore, pscore_item_position, pscore_cascade\n+\n+ def _calc_epsilon_greedy_pscore(\n+ self,\n+ epsilon: float,\n+ action_2d: np.ndarray,\n+ sorted_actions: np.ndarray,\n+ random_pscore: np.ndarray,\n+ random_pscore_item_position: np.ndarray,\n+ random_pscore_cascade: np.ndarray,\n+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n+ \"\"\"Calculate pscores given action_2d, sorted_actions, and random pscores.\n+\n+ Parameters\n+ -----------\n+ epsilon: float, default=1.\n+ Exploration hyperparameter that must take value in the range of [0., 1.].\n+ When evaluation_policy_type is 'random', this argument is unnecessary.\n+\n+ action_2d: array-like, shape (n_rounds, len_list), default=None\n+ Actions sampled by a behavior policy.\n+ Action list of slate `i` is stored in action[`i`].\n+ When bandit_feedback is obtained by `obtain_batch_bandit_feedback`, we can obtain action_2d as follows: bandit_feedback[\"action\"].reshape((n_rounds, len_list))\n+ When evaluation_policy_type is 'random', this argument is unnecessary.\n+\n+ random_pscore: array-like, shape (n_unique_action * len_list)\n+ Joint action choice probabilities of the slate given context (:math:`x`) when the evaluation policy is random.\n+ i.e., :math:`\\\\frac{1}{{}_{n} P _r)`, where :math:`n` is `n_unique_actions` and :math:`r` is `len_list`.\n+\n+ random_pscore_item_position: array-like, shape (n_unique_action * len_list)\n+ Marginal action choice probabilities of each slot given context (:math:`x`) when the evaluation policy is random.\n+ i.e., :math:`\\\\frac{1}{n)`, where :math:`n` is `n_unique_actions`.\n+\n+ random_pscore_cascade: array-like, shape (n_unique_action * len_list)\n+ Joint action choice probabilities above the slot (:math:`k`) in each slate given context (:math:`x`) when the evaluation policy is random.\n+ i.e., :math:`\\\\frac{1}{{}_{n} P _k)`, where :math:`n` is `n_unique_actions`.\n+\n+\n+ Returns\n+ ----------\n+ pscore: array-like, shape (n_unique_action * len_list)\n+ Joint action choice probabilities of the slate given context (:math:`x`).\n+ i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{\\\\text{len_list}})`.\n+\n+ pscore_item_position: array-like, shape (n_unique_action * len_list)\n+ Marginal action choice probabilities of each slot given context (:math:`x`).\n+ i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+\n+ pscore_cascade: array-like, shape (n_unique_action * len_list)\n+ Joint action choice probabilities above the slot (:math:`k`) in each slate given context (:math:`x`).\n+ i.e., :math:`\\\\pi_k: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A}^{k})`.\n+\n+ \"\"\"\n+ if not isinstance(action_2d, np.ndarray) or action_2d.ndim != 2:\n+ raise ValueError(\"action_2d must be 2-dimensional ndarray\")\n+ if set([np.unique(x).shape[0] for x in action_2d]) != set([self.len_list]):\n+ raise ValueError(\"actions of each slate must not be duplicated\")\n+ match_action_flg = sorted_actions == action_2d\n+ pscore_flg = np.repeat(match_action_flg.all(axis=1), self.len_list)\n+ pscore_item_position_flg = match_action_flg.flatten()\n+ pscore_cascade_flg = match_action_flg.cumprod(axis=1).flatten()\n+ # calculate pscores\n+ pscore = pscore_flg * (1 - epsilon) + epsilon * random_pscore\n+ pscore_item_position = (\n+ pscore_item_position_flg * (1 - epsilon)\n+ + epsilon * random_pscore_item_position\n+ )\n+ pscore_cascade = (\n+ pscore_cascade_flg * (1 - epsilon) + epsilon * random_pscore_cascade\n+ )\n+ return pscore, pscore_item_position, pscore_cascade\n+\ndef generate_symmetric_matrix(n_unique_action: int, random_state: int) -> np.ndarray:\n\"\"\"Generate symmetric matrix\n@@ -609,7 +838,7 @@ def action_interaction_additive_reward_function(\naction_context: array-like, shape (n_unique_action, dim_action_context)\nVector representation for each action.\n- action: array-like, shape (n_unique_action * len_list)\n+ action: array-like, shape (n_rounds * len_list)\nSampled action.\nAction list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`].\n@@ -726,7 +955,7 @@ def action_interaction_exponential_reward_function(\naction_context: array-like, shape (n_unique_action, dim_action_context)\nVector representation for each action.\n- action: array-like, shape (n_unique_action * len_list)\n+ action: array-like, shape (n_rounds * len_list)\nSampled action.\nAction list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`].\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -711,3 +711,381 @@ def test_synthetic_slate_using_valid_inputs(\nprint(bandit_feedback_df.groupby(\"position\")[\"reward\"].describe())\nif reward_type == \"binary\":\nassert set(np.unique(bandit_feedback[\"reward\"])) == set([0, 1])\n+\n+\n+n_rounds = 5\n+len_list = 3\n+# slate_id, reward, description\n+invalid_input_of_calc_true_policy_value = [\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ \"4\", #\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros((n_rounds, len_list), dtype=int), #\n+ \"reward must be 1-dimensional\",\n+ ),\n+ (\n+ \"4\", #\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ \"slate_id must be ndarray\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list).reshape((n_rounds, len_list)), #\n+ np.zeros(n_rounds * len_list, dtype=int),\n+ \"slate_id must be 1-dimensional\",\n+ ),\n+ (\n+ np.repeat(np.arange(n_rounds), len_list),\n+ np.zeros(n_rounds * len_list - 1, dtype=int), #\n+ \"the size of axis 0 of reward must be the same as that of slate_id\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"slate_id, reward, description\",\n+ invalid_input_of_calc_true_policy_value,\n+)\n+def test_calc_on_policy_value_using_invalid_input_data(\n+ slate_id, reward, description\n+) -> None:\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dataset.calc_on_policy_value(reward=reward, slate_id=slate_id)\n+\n+\n+# slate_id, reward, description\n+valid_input_of_calc_true_policy_value = [\n+ (\n+ np.array([1, 1, 2, 2, 3, 4]),\n+ np.array([0, 1, 1, 0, 0, 0]),\n+ 0.5,\n+ \"4 slate ids\",\n+ ),\n+ (\n+ np.array([1, 1]),\n+ np.array([2, 3]),\n+ 5,\n+ \"one slate id\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"slate_id, reward, result, description\",\n+ valid_input_of_calc_true_policy_value,\n+)\n+def test_calc_on_policy_value_using_valid_input_data(\n+ slate_id, reward, result, description\n+) -> None:\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ )\n+ assert result == dataset.calc_on_policy_value(reward=reward, slate_id=slate_id)\n+\n+\n+# evaluation_policy_type, epsilon, context, action_2d, random_state, err, description\n+invalid_input_of_generate_evaluation_policy_pscore = [\n+ (\n+ \"awesome\", #\n+ 1.0,\n+ np.ones([5, 2]),\n+ np.tile(np.arange(3), 5).reshape((5, 3)),\n+ 1.0,\n+ ValueError,\n+ \"evaluation_policy_type must be\",\n+ ),\n+ (\n+ \"optimal\",\n+ 1.0,\n+ np.array([5, 2]), #\n+ np.tile(np.arange(3), 5).reshape((5, 3)),\n+ 1.0,\n+ ValueError,\n+ \"context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ \"optimal\",\n+ 1.0,\n+ np.ones([5, 2]),\n+ np.ones(5), #\n+ 1,\n+ ValueError,\n+ \"action_2d must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ \"optimal\",\n+ 1.0,\n+ np.ones([5, 2]),\n+ np.random.choice(5), #\n+ 1,\n+ ValueError,\n+ \"action_2d must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ \"optimal\",\n+ 1.0,\n+ np.ones([5, 2]),\n+ np.ones([4, 2]), #\n+ 1,\n+ ValueError,\n+ \"the size of axis 0\",\n+ ),\n+ (\n+ \"optimal\",\n+ 1.0,\n+ np.ones([5, 2]),\n+ np.ones([5, 2]), #\n+ 1,\n+ ValueError,\n+ \"actions of each slate must not be duplicated\",\n+ ),\n+ (\n+ \"optimal\",\n+ -1.0, #\n+ np.ones([5, 2]),\n+ np.ones([4, 2]),\n+ 1,\n+ ValueError,\n+ \"\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"evaluation_policy_type, epsilon, context, action_2d, random_state, err, description\",\n+ invalid_input_of_generate_evaluation_policy_pscore,\n+)\n+def test_generate_evaluation_policy_pscore_using_invalid_input_data(\n+ evaluation_policy_type,\n+ epsilon,\n+ context,\n+ action_2d,\n+ random_state,\n+ err,\n+ description,\n+) -> None:\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ base_reward_function=logistic_reward_function,\n+ )\n+ if description == \"\":\n+ with pytest.raises(err):\n+ _ = dataset.generate_evaluation_policy_pscore(\n+ evaluation_policy_type=evaluation_policy_type,\n+ epsilon=epsilon,\n+ context=context,\n+ action_2d=action_2d,\n+ random_state=random_state,\n+ )\n+ else:\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = dataset.generate_evaluation_policy_pscore(\n+ evaluation_policy_type=evaluation_policy_type,\n+ epsilon=epsilon,\n+ context=context,\n+ action_2d=action_2d,\n+ random_state=random_state,\n+ )\n+\n+\n+# evaluation_policy_type, epsilon, description\n+valid_input_of_generate_evaluation_policy_pscore = [\n+ (\n+ \"optimal\",\n+ 0.1,\n+ \"optimal evaluation policy\",\n+ ),\n+ (\n+ \"anti-optimal\",\n+ 0.1,\n+ \"anti-optimal evaluation policy\",\n+ ),\n+ (\n+ \"random\",\n+ None,\n+ \"random evaluation policy\",\n+ ),\n+ (\n+ \"optimal\",\n+ 0.0,\n+ \"optimal evaluation policy, epsilon=0.0 (greedy)\",\n+ ),\n+ (\n+ \"optimal\",\n+ 1.0,\n+ \"optimal evaluation policy, epsilon=1.0 (random)\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"evaluation_policy_type, epsilon, description\",\n+ valid_input_of_generate_evaluation_policy_pscore,\n+)\n+def test_generate_evaluation_policy_pscore_using_valid_input_data(\n+ evaluation_policy_type,\n+ epsilon,\n+ description,\n+) -> None:\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 100\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ base_reward_function=logistic_reward_function,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds, return_pscore_item_position=True\n+ )\n+ # generate pscores\n+ (\n+ pscore,\n+ pscore_item_position,\n+ pscore_cascade,\n+ ) = dataset.generate_evaluation_policy_pscore(\n+ evaluation_policy_type=evaluation_policy_type,\n+ context=bandit_feedback[\"context\"],\n+ random_state=random_state,\n+ epsilon=epsilon,\n+ action_2d=bandit_feedback[\"action\"].reshape((n_rounds, len_list)),\n+ )\n+ if evaluation_policy_type == \"random\" or epsilon == 1.0:\n+ # pscores of random evaluation policy must be the same as those of bandit feedback using random behavior policy\n+ assert np.allclose(pscore, bandit_feedback[\"pscore\"])\n+ assert np.allclose(\n+ pscore_item_position, bandit_feedback[\"pscore_item_position\"]\n+ )\n+ assert np.allclose(pscore_cascade, bandit_feedback[\"pscore_cascade\"])\n+ if epsilon == 0.0:\n+ # pscore element of greedy evaluation policy must be either 0 or 1\n+ assert len(set(np.unique(pscore)) - set([0.0, 1.0])) == 0\n+ assert len(set(np.unique(pscore_item_position)) - set([0.0, 1.0])) == 0\n+ assert len(set(np.unique(pscore_cascade)) - set([0.0, 1.0])) == 0\n+\n+\n+# n_unique_action, len_list, epsilon, action_2d, sorted_actions, random_pscore, random_pscore_item_position, random_pscore_cascade, true_pscore, true_pscore_item_position, true_pscore_cascade, description\n+valid_input_of_calc_epsilon_greedy_pscore = [\n+ (\n+ 5,\n+ 3,\n+ 0.1,\n+ np.tile(np.arange(3), 4).reshape((4, 3)),\n+ np.array([[0, 1, 2], [0, 1, 3], [1, 0, 2], [1, 0, 4]]),\n+ np.ones(12) / 60, # 1 / 5P3\n+ np.ones(12) / 5, # 1/ 5\n+ np.tile([1 / 5, 1 / 20, 1 / 60], 4),\n+ np.array(\n+ [[0.9 + 0.1 / 60] * 3, [0.1 / 60] * 3, [0.1 / 60] * 3, [0.1 / 60] * 3]\n+ ).flatten(),\n+ np.array(\n+ [\n+ [0.9 + 0.1 / 5] * 3,\n+ [0.9 + 0.1 / 5, 0.9 + 0.1 / 5, 0.1 / 5],\n+ [0.1 / 5, 0.1 / 5, 0.9 + 0.1 / 5],\n+ [0.1 / 5] * 3,\n+ ]\n+ ).flatten(),\n+ np.array(\n+ [\n+ [0.9 + 0.1 / 5, 0.9 + 0.1 / 20, 0.9 + 0.1 / 60],\n+ [0.9 + 0.1 / 5, 0.9 + 0.1 / 20, 0.1 / 60],\n+ [0.1 / 5, 0.1 / 20, 0.1 / 60],\n+ [0.1 / 5, 0.1 / 20, 0.1 / 60],\n+ ]\n+ ).flatten(),\n+ \"epsolon is 0.1\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"n_unique_action, len_list, epsilon, action_2d, sorted_actions, random_pscore, random_pscore_item_position, random_pscore_cascade, true_pscore, true_pscore_item_position, true_pscore_cascade, description\",\n+ valid_input_of_calc_epsilon_greedy_pscore,\n+)\n+def test_calc_epsilon_greedy_pscore_using_valid_input_data(\n+ n_unique_action,\n+ len_list,\n+ epsilon,\n+ action_2d,\n+ sorted_actions,\n+ random_pscore,\n+ random_pscore_item_position,\n+ random_pscore_cascade,\n+ true_pscore,\n+ true_pscore_item_position,\n+ true_pscore_cascade,\n+ description,\n+) -> None:\n+ # set parameters\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ base_reward_function=logistic_reward_function,\n+ )\n+ (\n+ pscore,\n+ pscore_item_position,\n+ pscore_cascade,\n+ ) = dataset._calc_epsilon_greedy_pscore(\n+ epsilon=epsilon,\n+ action_2d=action_2d,\n+ sorted_actions=sorted_actions,\n+ random_pscore=random_pscore,\n+ random_pscore_item_position=random_pscore_item_position,\n+ random_pscore_cascade=random_pscore_cascade,\n+ )\n+ assert np.allclose(true_pscore, pscore)\n+ assert np.allclose(true_pscore_item_position, pscore_item_position)\n+ assert np.allclose(true_pscore_cascade, pscore_cascade)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add generate_evaluation_policy_pscore and calc_on_policy_value |
641,006 | 16.05.2021 22:49:48 | -32,400 | a781e97296a2a39cde971a1985f96059db5eac77 | fix action_2d to action; fix function name of calc_on_policy_value | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -563,7 +563,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore_item_position=pscore_item_position,\n)\n- def calc_on_policy_value(self, reward: np.ndarray, slate_id: np.ndarray) -> float:\n+ def calc_on_policy_policy_value(\n+ self, reward: np.ndarray, slate_id: np.ndarray\n+ ) -> float:\n\"\"\"Calculate the policy value of given reward and slate_id.\nParameters\n@@ -601,7 +603,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ncontext: np.ndarray,\nrandom_state: int,\nepsilon: Optional[float] = 1.0,\n- action_2d: Optional[np.ndarray] = None,\n+ action: Optional[np.ndarray] = None,\n+ slate_id: Optional[np.ndarray] = None,\n+ position: Optional[np.ndarray] = None,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\"\"\"Generate pscores of three types of evaluation policies ('random', 'optimal', 'anti-optimal').\n@@ -624,10 +628,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nExploration hyperparameter that must take value in the range of [0., 1.].\nWhen evaluation_policy_type is 'random', this argument is unnecessary.\n- action_2d: array-like, shape (n_rounds, len_list), default=None\n+ action: array-like, shape (n_rounds * len_list,), default=None\nActions sampled by a behavior policy.\n- Action list of slate `i` is stored in action[`i`].\n- When bandit_feedback is obtained by `obtain_batch_bandit_feedback`, we can obtain action_2d as follows: bandit_feedback[\"action\"].reshape((n_rounds, len_list))\n+ Action list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`].\nWhen evaluation_policy_type is 'random', this argument is unnecessary.\n@@ -675,8 +678,15 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_context=self.action_context,\nrandom_state=random_state,\n)\n- if not isinstance(action_2d, np.ndarray) or action_2d.ndim != 2:\n- raise ValueError(\"action_2d must be 2-dimensional ndarray\")\n+ if (\n+ not isinstance(action, np.ndarray)\n+ or action.ndim != 1\n+ or action.shape[0] != context.shape[0] * self.len_list\n+ ):\n+ raise ValueError(\n+ \"action must be 1-dimensional ndarray, shape (n_rounds * len_list)\"\n+ )\n+ action_2d = action.reshape((context.shape[0], self.len_list))\nif context.shape[0] != action_2d.shape[0]:\nraise ValueError(\n\"the size of axis 0 of context must be the same as that of action_2d\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -749,7 +749,7 @@ invalid_input_of_calc_true_policy_value = [\n\"slate_id, reward, description\",\ninvalid_input_of_calc_true_policy_value,\n)\n-def test_calc_on_policy_value_using_invalid_input_data(\n+def test_calc_on_policy_policy_value_using_invalid_input_data(\nslate_id, reward, description\n) -> None:\n# set parameters\n@@ -766,7 +766,7 @@ def test_calc_on_policy_value_using_invalid_input_data(\nrandom_state=random_state,\n)\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n- _ = dataset.calc_on_policy_value(reward=reward, slate_id=slate_id)\n+ _ = dataset.calc_on_policy_policy_value(reward=reward, slate_id=slate_id)\n# slate_id, reward, description\n@@ -790,7 +790,7 @@ valid_input_of_calc_true_policy_value = [\n\"slate_id, reward, result, description\",\nvalid_input_of_calc_true_policy_value,\n)\n-def test_calc_on_policy_value_using_valid_input_data(\n+def test_calc_on_policy_policy_value_using_valid_input_data(\nslate_id, reward, result, description\n) -> None:\n# set parameters\n@@ -807,16 +807,18 @@ def test_calc_on_policy_value_using_valid_input_data(\nrandom_state=random_state,\nbehavior_policy_function=linear_behavior_policy_logit,\n)\n- assert result == dataset.calc_on_policy_value(reward=reward, slate_id=slate_id)\n+ assert result == dataset.calc_on_policy_policy_value(\n+ reward=reward, slate_id=slate_id\n+ )\n-# evaluation_policy_type, epsilon, context, action_2d, random_state, err, description\n+# evaluation_policy_type, epsilon, context, action, random_state, err, description\ninvalid_input_of_generate_evaluation_policy_pscore = [\n(\n\"awesome\", #\n1.0,\nnp.ones([5, 2]),\n- np.tile(np.arange(3), 5).reshape((5, 3)),\n+ np.tile(np.arange(3), 5),\n1.0,\nValueError,\n\"evaluation_policy_type must be\",\n@@ -825,7 +827,7 @@ invalid_input_of_generate_evaluation_policy_pscore = [\n\"optimal\",\n1.0,\nnp.array([5, 2]), #\n- np.tile(np.arange(3), 5).reshape((5, 3)),\n+ np.tile(np.arange(3), 5),\n1.0,\nValueError,\n\"context must be 2-dimensional ndarray\",\n@@ -834,10 +836,10 @@ invalid_input_of_generate_evaluation_policy_pscore = [\n\"optimal\",\n1.0,\nnp.ones([5, 2]),\n- np.ones(5), #\n+ np.ones([5, 2]), #\n1,\nValueError,\n- \"action_2d must be 2-dimensional ndarray\",\n+ \"action must be 1-dimensional ndarray\",\n),\n(\n\"optimal\",\n@@ -846,22 +848,22 @@ invalid_input_of_generate_evaluation_policy_pscore = [\nnp.random.choice(5), #\n1,\nValueError,\n- \"action_2d must be 2-dimensional ndarray\",\n+ \"action must be 1-dimensional ndarray\",\n),\n(\n\"optimal\",\n1.0,\nnp.ones([5, 2]),\n- np.ones([4, 2]), #\n+ np.ones(5), #\n1,\nValueError,\n- \"the size of axis 0\",\n+ \"action must be 1-dimensional ndarray, shape (n_rounds * len_list)\",\n),\n(\n\"optimal\",\n1.0,\nnp.ones([5, 2]),\n- np.ones([5, 2]), #\n+ np.ones(15), #\n1,\nValueError,\n\"actions of each slate must not be duplicated\",\n@@ -870,7 +872,7 @@ invalid_input_of_generate_evaluation_policy_pscore = [\n\"optimal\",\n-1.0, #\nnp.ones([5, 2]),\n- np.ones([4, 2]),\n+ np.tile(np.arange(3), 5),\n1,\nValueError,\n\"\",\n@@ -879,14 +881,14 @@ invalid_input_of_generate_evaluation_policy_pscore = [\[email protected](\n- \"evaluation_policy_type, epsilon, context, action_2d, random_state, err, description\",\n+ \"evaluation_policy_type, epsilon, context, action, random_state, err, description\",\ninvalid_input_of_generate_evaluation_policy_pscore,\n)\ndef test_generate_evaluation_policy_pscore_using_invalid_input_data(\nevaluation_policy_type,\nepsilon,\ncontext,\n- action_2d,\n+ action,\nrandom_state,\nerr,\ndescription,\n@@ -911,7 +913,7 @@ def test_generate_evaluation_policy_pscore_using_invalid_input_data(\nevaluation_policy_type=evaluation_policy_type,\nepsilon=epsilon,\ncontext=context,\n- action_2d=action_2d,\n+ action=action,\nrandom_state=random_state,\n)\nelse:\n@@ -920,7 +922,7 @@ def test_generate_evaluation_policy_pscore_using_invalid_input_data(\nevaluation_policy_type=evaluation_policy_type,\nepsilon=epsilon,\ncontext=context,\n- action_2d=action_2d,\n+ action=action,\nrandom_state=random_state,\n)\n@@ -993,7 +995,7 @@ def test_generate_evaluation_policy_pscore_using_valid_input_data(\ncontext=bandit_feedback[\"context\"],\nrandom_state=random_state,\nepsilon=epsilon,\n- action_2d=bandit_feedback[\"action\"].reshape((n_rounds, len_list)),\n+ action=bandit_feedback[\"action\"],\n)\nif evaluation_policy_type == \"random\" or epsilon == 1.0:\n# pscores of random evaluation policy must be the same as those of bandit feedback using random behavior policy\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix action_2d to action; fix function name of calc_on_policy_value |
641,011 | 17.05.2021 10:18:49 | -32,400 | 60ac0f6f558e752f70dff80c8fd40d9c64ee58dd | reflect comments on PR88 | [
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/README.md",
"new_path": "examples/quickstart/README.md",
"diff": "@@ -7,3 +7,4 @@ This page contains a list of quickstart notebooks written with the Open Bandit P\n- [`multiclass.ipynb`](./multiclass.ipynb): a quickstart guide to handle multi-class classification data as logged bandit feedback data for the standard off-policy learning, off-policy evaluation (OPE), and the evaluation of OPE procedures with the Open Bandit Pipeline.\n- [`online.ipynb`](./online.ipynb): a quickstart guide to implement off-policy evaluation (OPE) and the evaluation of OPE procedures for online bandit algorithms with the Open Bandit Pipeline.\n- [`opl.ipynb`](./opl.ipynb): a quickstart guide to implement off-policy learners and the evaluation of off-policy learners with the Open Bandit Pipeline.\n+- [`synthetic_slate.ipynb`](./synthetic_slate.ipynb): a quickstart guide to implement off-policy evaluation (OPE) and the evaluation of OPE procedures for the slate recommendation setting with the Open Bandit Pipeline.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_slate.py",
"new_path": "obp/ope/estimators_slate.py",
"diff": "@@ -10,9 +10,9 @@ import numpy as np\nfrom ..utils import (\nestimate_confidence_interval_by_bootstrap,\n- check_sips_ope_inputs,\n- check_iips_ope_inputs,\n- check_rips_ope_inputs,\n+ check_sips_inputs,\n+ check_iips_inputs,\n+ check_rips_inputs,\n)\n@@ -38,7 +38,7 @@ class BaseSlateOffPolicyEstimator(metaclass=ABCMeta):\n@dataclass\nclass BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\n- \"\"\"Base Class of Slate Inverse Probability Weighting.\n+ \"\"\"Base Class of Slate Inverse Probability Weighting Estimators.\nlen_list: int (> 1)\nLength of a list of actions recommended in each impression.\n@@ -56,15 +56,15 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nevaluation_policy_pscore: np.ndarray,\n**kwargs,\n) -> np.ndarray:\n- \"\"\"Estimate rewards for each round and slot.\n+ \"\"\"Estimate rewards given round (slate_id) and slot (position)).\nParameters\n----------\nreward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\nposition: array-like, shape (<= n_rounds * len_list,)\n- Positions of each round and slot in the given logged bandit feedback.\n+ IDs to differentiate slot (i.e., position) in each slate.\nbehavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -75,7 +75,7 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nReturns\n----------\nestimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by IPW for each round and slot.\n+ Rewards estimated by IPW given round (slate_id) and slot (position)).\n\"\"\"\niw = evaluation_policy_pscore / behavior_policy_pscore\n@@ -95,10 +95,10 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nParameters\n----------\nslate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n+ IDs to differentiate slates (i.e., rounds or lists of actions).\nestimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by IPW for each round and slot.\n+ Rewards estimated by IPW given round (slate_id) and slot (position)).\nalpha: float, default=0.05\nSignificant level of confidence intervals.\n@@ -135,7 +135,7 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nNote\n-------\n- Slate Standard Inverse Propensity Scoring (SIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e`.\n+ Slate Standard Inverse Propensity Scoring (SIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e` without any assumption about user behavior.\nParameters\n----------\n@@ -165,13 +165,13 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nParameters\n----------\nslate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n+ IDs to differentiate slates (i.e., rounds or lists of actions).\nreward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\nposition: array-like, shape (<= n_rounds * len_list,)\n- Positions of each round and slot in the given logged bandit feedback.\n+ IDs to differentiate slot (i.e., position) in each slate.\npscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -186,7 +186,7 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n- check_sips_ope_inputs(\n+ check_sips_inputs(\nslate_id=slate_id,\nreward=reward,\nposition=position,\n@@ -220,13 +220,13 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nParameters\n----------\nslate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n+ IDs to differentiate slates (i.e., rounds or lists of actions).\nreward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\nposition: array-like, shape (<= n_rounds * len_list,)\n- Positions of each round and slot in the given logged bandit feedback.\n+ IDs to differentiate slot (i.e., position) in each slate.\npscore: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n@@ -250,7 +250,7 @@ class SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n- check_sips_ope_inputs(\n+ check_sips_inputs(\nslate_id=slate_id,\nreward=reward,\nposition=position,\n@@ -278,7 +278,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nNote\n-------\n- Slate Independent Inverse Propensity Scoring (IIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e`.\n+ Slate Independent Inverse Propensity Scoring (IIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e` assuming the item-position click model.\nParameters\n----------\n@@ -308,19 +308,19 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nParameters\n----------\nslate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n+ IDs to differentiate slates (i.e., rounds or lists of actions).\nreward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\nposition: array-like, shape (<= n_rounds * len_list,)\n- Positions of each round and slot in the given logged bandit feedback.\n+ IDs to differentiate slot (i.e., position) in each slate.\npscore_item_position: array-like, shape (<= n_rounds * len_list,)\n- Marginal action choice probabilities of the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_{t, k}|x_t)`.\n+ Marginal probabilities that action :math:`a` is chosen at position (slot) :math:`k` by a behavior policy given context :math:`x`, i.e., :math:`\\\\pi_b(a_{t}(k) |x_t)`.\nevaluation_policy_pscore_item_position: array-like, shape (<= n_rounds * len_list,)\n- Marginal action choice probabilities of the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_{t, k}|x_t)`.\n+ Marginal probabilities that action :math:`a` is chosen at position (slot) :math:`k` by a evaluation policy given context :math:`x`, i.e., :math:`\\\\pi_e(a_{t}(k) |x_t)`.\nReturns\n----------\n@@ -328,7 +328,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nEstimated policy value (performance) of a given evaluation policy.\n\"\"\"\n- check_iips_ope_inputs(\n+ check_iips_inputs(\nslate_id=slate_id,\nreward=reward,\nposition=position,\n@@ -362,13 +362,13 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nParameters\n----------\nslate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n+ IDs to differentiate slates (i.e., rounds or lists of actions).\nreward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\nposition: array-like, shape (<= n_rounds * len_list,)\n- Positions of each round and slot in the given logged bandit feedback.\n+ IDs to differentiate slot (i.e., position) in each slate.\npscore_item_position: array-like, shape (<= n_rounds * len_list,)\nMarginal action choice probabilities of the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_{t, k}|x_t)`.\n@@ -391,7 +391,7 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n- check_iips_ope_inputs(\n+ check_iips_inputs(\nslate_id=slate_id,\nreward=reward,\nposition=position,\n@@ -414,12 +414,12 @@ class SlateIndependentIPS(BaseSlateInverseProbabilityWeighting):\n@dataclass\n-class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\n- \"\"\"Estimate the policy value by Slate Recursive Inverse Propensity Scoring (RIPS).\n+class SlateRewardInteractionIPS(BaseSlateInverseProbabilityWeighting):\n+ \"\"\"Estimate the policy value by Slate Reward Interaction Inverse Propensity Scoring (RIPS).\nNote\n-------\n- Slate Recursive Inverse Propensity Scoring (RIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e`.\n+ Slate Reward Interaction Inverse Propensity Scoring (RIPS) estimates the policy value of a given evaluation policy :math:`\\\\pi_e` assuming the cascade click model.\nParameters\n----------\n@@ -449,19 +449,19 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nParameters\n----------\nslate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n+ IDs to differentiate slates (i.e., rounds or lists of actions).\nreward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\nposition: array-like, shape (<= n_rounds * len_list,)\n- Positions of each round and slot in the given logged bandit feedback.\n+ IDs to differentiate slot (i.e., position) in each slate.\npscore_cascade: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities above the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n+ Action choice probabilities under the cascade behavior model by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t(k) | x_t, a_t(1), \\ldots, a_t(k-1))`.\nevaluation_policy_pscore_cascade: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities above the slot (:math:`k`) by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n+ Action choice probabilities under the cascade behavior model by the evaluation policy (propensity scores), i.e., :math:`\\\\pi_e(a_t(k) | x_t, a_t(1), \\ldots, a_t(k-1))`.\nReturns\n----------\n@@ -470,7 +470,7 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\n\"\"\"\n- check_rips_ope_inputs(\n+ check_rips_inputs(\nslate_id=slate_id,\nreward=reward,\nposition=position,\n@@ -504,13 +504,13 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nParameters\n----------\nslate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n+ IDs to differentiate slates (i.e., rounds or lists of actions).\nreward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\nposition: array-like, shape (<= n_rounds * len_list,)\n- Positions of each round and slot in the given logged bandit feedback.\n+ IDs to differentiate slot (i.e., position) in each slate.\npscore_cascade: array-like, shape (<= n_rounds * len_list,)\nAction choice probabilities above the slot (:math:`k`) by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(\\\\{a_{t, j}\\\\}_{j \\\\le k}|x_t)`.\n@@ -533,7 +533,7 @@ class SlateRecursiveIPS(BaseSlateInverseProbabilityWeighting):\nDictionary storing the estimated mean and upper-lower confidence bounds.\n\"\"\"\n- check_rips_ope_inputs(\n+ check_rips_inputs(\nslate_id=slate_id,\nreward=reward,\nposition=position,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta_slate.py",
"new_path": "obp/ope/meta_slate.py",
"diff": "@@ -26,7 +26,7 @@ class SlateOffPolicyEvaluation:\nParameters\n-----------\nbandit_feedback: BanditFeedback\n- Logged bandit feedback data used for off-policy evaluation.\n+ Logged bandit feedback data used for off-policy evaluation for the slate recommendation setting.\nope_estimators: List[BaseSlateOffPolicyEstimator]\nList of OPE estimators used to evaluate the policy value of evaluation policy.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -339,14 +339,14 @@ def check_ope_inputs(\nraise ValueError(\"pscore must be positive\")\n-def check_sips_ope_inputs(\n+def check_sips_inputs(\nslate_id: np.ndarray,\nreward: np.ndarray,\nposition: np.ndarray,\npscore: np.ndarray,\nevaluation_policy_pscore: np.ndarray,\n) -> Optional[ValueError]:\n- \"\"\"Check inputs for sips ope.\n+ \"\"\"Check inputs of SlateStandardIPS.\nParameters\n-----------\n@@ -393,8 +393,8 @@ def check_sips_ope_inputs(\nraise ValueError(\"evaluation_policy_pscore must be ndarray\")\nif evaluation_policy_pscore.ndim != 1:\nraise ValueError(\"evaluation_policy_pscore must be 1-dimensional\")\n- if np.any(evaluation_policy_pscore <= 0) or np.any(evaluation_policy_pscore > 1):\n- raise ValueError(\"evaluation_policy_pscore must be in the range of (0, 1]\")\n+ if np.any(evaluation_policy_pscore < 0) or np.any(evaluation_policy_pscore > 1):\n+ raise ValueError(\"evaluation_policy_pscore must be in the range of [0, 1]\")\n# slate id\nif not isinstance(slate_id, np.ndarray):\n@@ -437,14 +437,14 @@ def check_sips_ope_inputs(\nraise ValueError(\"evaluation_policy_pscore must be unique in each slate\")\n-def check_iips_ope_inputs(\n+def check_iips_inputs(\nslate_id: np.ndarray,\nreward: np.ndarray,\nposition: np.ndarray,\npscore_item_position: np.ndarray,\nevaluation_policy_pscore_item_position: np.ndarray,\n) -> Optional[ValueError]:\n- \"\"\"Check inputs for sips ope.\n+ \"\"\"Check inputs of SlateIndependentIPS.\nParameters\n-----------\n@@ -491,11 +491,11 @@ def check_iips_ope_inputs(\nraise ValueError(\"evaluation_policy_pscore_item_position must be ndarray\")\nif evaluation_policy_pscore_item_position.ndim != 1:\nraise ValueError(\"evaluation_policy_pscore_item_position must be 1-dimensional\")\n- if np.any(evaluation_policy_pscore_item_position <= 0) or np.any(\n+ if np.any(evaluation_policy_pscore_item_position < 0) or np.any(\nevaluation_policy_pscore_item_position > 1\n):\nraise ValueError(\n- \"evaluation_policy_pscore_item_position must be in the range of (0, 1]\"\n+ \"evaluation_policy_pscore_item_position must be in the range of [0, 1]\"\n)\n# slate id\n@@ -524,14 +524,14 @@ def check_iips_ope_inputs(\nraise ValueError(\"position must not be duplicated in each slate\")\n-def check_rips_ope_inputs(\n+def check_rips_inputs(\nslate_id: np.ndarray,\nreward: np.ndarray,\nposition: np.ndarray,\npscore_cascade: np.ndarray,\nevaluation_policy_pscore_cascade: np.ndarray,\n) -> Optional[ValueError]:\n- \"\"\"Check inputs for sips ope.\n+ \"\"\"Check inputs of SlateRewardInteractionIPS.\nParameters\n-----------\n@@ -578,11 +578,11 @@ def check_rips_ope_inputs(\nraise ValueError(\"evaluation_policy_pscore_cascade must be ndarray\")\nif evaluation_policy_pscore_cascade.ndim != 1:\nraise ValueError(\"evaluation_policy_pscore_cascade must be 1-dimensional\")\n- if np.any(evaluation_policy_pscore_cascade <= 0) or np.any(\n+ if np.any(evaluation_policy_pscore_cascade < 0) or np.any(\nevaluation_policy_pscore_cascade > 1\n):\nraise ValueError(\n- \"evaluation_policy_pscore_cascade must be in the range of (0, 1]\"\n+ \"evaluation_policy_pscore_cascade must be in the range of [0, 1]\"\n)\n# slate id\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_meta_slate.py",
"new_path": "tests/ope/test_meta_slate.py",
"diff": "@@ -14,7 +14,7 @@ from obp.ope import (\nSlateOffPolicyEvaluation,\nSlateStandardIPS,\nSlateIndependentIPS,\n- SlateRecursiveIPS,\n+ SlateRewardInteractionIPS,\n)\nfrom obp.utils import check_confidence_interval_arguments\n@@ -133,7 +133,7 @@ class SlateIndependentIPSMock(SlateIndependentIPS):\n@dataclass\n-class SlateRecursiveIPSMock(SlateRecursiveIPS):\n+class SlateRewardInteractionIPSMock(SlateRewardInteractionIPS):\n\"\"\"Slate Recursive Inverse Propensity Scoring (RIPS) Mock.\"\"\"\nestimator_name: str = \"rips\"\n@@ -189,7 +189,7 @@ sips = SlateStandardIPSMock(len_list=3)\nsips2 = SlateStandardIPSMock(len_list=3, eps=0.02)\nsips3 = SlateStandardIPSMock(len_list=3, estimator_name=\"sips3\")\niips = SlateIndependentIPSMock(len_list=3)\n-rips = SlateRecursiveIPSMock(len_list=3)\n+rips = SlateRewardInteractionIPSMock(len_list=3)\ndef test_meta_post_init(synthetic_slate_bandit_feedback: BanditFeedback) -> None:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | reflect comments on PR88 |
641,014 | 17.05.2021 16:12:07 | -32,400 | 5c180f177e343f362e861a98a50bc2fb5adfc398 | generalize click models and fix minor erros | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "from dataclasses import dataclass\nfrom typing import Optional, Callable, Tuple, Union, List\nfrom itertools import permutations\n-from math import factorial\nimport numpy as np\nfrom scipy.stats import truncnorm\n+from scipy.special import perm\nfrom sklearn.utils import check_random_state, check_scalar\nfrom tqdm import tqdm\n@@ -62,11 +62,19 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nclick_model: str, default=None\nType of click model, which must be one of None, 'pbm', or 'cascade'.\n- When None is given, reward of each slot is sampled based on the expected reward of the slot.\n- When 'pbm' is given, reward of each slot is sampled based on the position-based model.\n- When 'cascade' is given, reward of each slot is sampled based on the cascade model.\n+ When None is given, reward at each slot is sampled based on the original expected rewards.\n+ When 'pbm' is given, reward at each slot is sampled based on the position-based model.\n+ When 'cascade' is given, reward at each slot is sampled based on the cascade model.\nWhen using some click model, 'continuous' reward type is unavailable.\n+ eta: float, default=1.0\n+ A hyperparameter to define the click models.\n+ When click_model='pbm', then eta defines examination probabilities of the position-based model.\n+ For example, when eta=0.5, then the examination probability at position `k` is :math:`\\\\theta (k) = (1/k)^{0.5}`.\n+ When click_model='cascade', then eta defines the position-dependent attractiveness parameters of the dependent click model\n+ (an extension of the cascade model).\n+ For example, when eta=0.5, the position-dependent attractiveness parameters at position `k` is :math:`\\\\alpha (k) = (1/k)^{0.5}`.\n+\nbase_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\nFunction generating expected reward for each given action-context pair,\ni.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n@@ -154,6 +162,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreward_type: str = \"binary\"\nreward_structure: str = \"cascade_additive\"\nclick_model: Optional[str] = None\n+ eta: float = 1.0\nbase_reward_function: Optional[\nCallable[\n[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray\n@@ -208,14 +217,21 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nf\"click_model must be one of 'cascade', 'pbm', or None, but {self.click_model} is given.\"\n)\n# set exam_weight (slot-level examination probability).\n- # When click_model is 'pbm', exam_weight is :math:`1 / k`, where :math:`k` is the position.\n+ # When click_model is 'pbm', exam_weight is :math:`(1 / k)^{\\\\eta}`, where :math:`k` is the position.\nif self.click_model == \"pbm\":\n- self.exam_weight = 1.0 / np.arange(1, self.len_list + 1)\n+ check_scalar(self.eta, name=\"eta\", target_type=float, min_val=0.0)\n+ self.exam_weight = (1.0 / np.arange(1, self.len_list + 1)) ** self.eta\n+ self.attractiveness = np.ones(self.len_list, dtype=float)\n+ elif self.click_model == \"cascade\":\n+ check_scalar(self.eta, name=\"eta\", target_type=float, min_val=0.0)\n+ self.attractiveness = (1.0 / np.arange(1, self.len_list + 1)) ** self.eta\n+ self.exam_weight = np.ones(self.len_list, dtype=float)\nelse:\n+ self.attractiveness = np.ones(self.len_list, dtype=float)\nself.exam_weight = np.ones(self.len_list, dtype=float)\nif self.click_model is not None and self.reward_type == \"continuous\":\nraise ValueError(\n- \"continuous reward type is unavailable when click model is given\"\n+ \"continuous outcome cannot be generated when click_model is given\"\n)\nif self.reward_structure in [\"cascade_additive\", \"standard_additive\"]:\n# generate additive action interaction weight matrix of (n_unique_action, n_unique_action)\n@@ -303,12 +319,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nn_rounds: int,\nreturn_pscore_item_position: bool = True,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray]]:\n- \"\"\"Sample action and obtain pscores.\n+ \"\"\"Sample action and obtain the three variants of the propensity scores.\nParameters\n------------\n- behavior_policy_logit_: array-like, shape (n_rounds, n_actiions)\n- Logit given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n+ behavior_policy_logit_: array-like, shape (n_rounds, n_actions)\n+ Logit values given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\nn_rounds: int\nNumber of rounds for synthetic bandit feedback data.\n@@ -417,12 +433,21 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n\"\"\"\nexpected_reward_factual *= self.exam_weight\nif self.reward_type == \"binary\":\n- reward = np.array(\n- [\n- self.random_.binomial(n=1, p=expected_reward_factual[:, position_])\n- for position_ in np.arange(self.len_list)\n- ]\n- ).T\n+ sampled_reward_list = list()\n+ sampled_rewards_at_position = np.ones(expected_reward_factual.shape[0])\n+ for position_ in np.arange(self.len_list):\n+ discount_factors = sampled_rewards_at_position * self.attractiveness[\n+ position_\n+ ] + (1 - sampled_rewards_at_position)\n+ expected_reward_factual_at_position = (\n+ discount_factors * expected_reward_factual[:, position_]\n+ )\n+ sampled_rewards_at_position = self.random_.binomial(\n+ n=1, p=expected_reward_factual_at_position\n+ )\n+ sampled_reward_list.append(sampled_rewards_at_position)\n+ reward = np.array(sampled_reward_list).T\n+\nelif self.reward_type == \"continuous\":\nreward = np.zeros(expected_reward_factual.shape)\nfor position_ in np.arange(self.len_list):\n@@ -438,14 +463,6 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\nelse:\nraise NotImplementedError\n- if self.click_model == \"cascade\":\n- argmax_first_slot = np.argmax(reward, axis=1)\n- for i, j in tqdm(\n- enumerate(argmax_first_slot),\n- desc=\"[sample_reward_of_cascade_model]\",\n- total=reward.shape[0],\n- ):\n- reward[i, j + 1 :] = 0\n# return: array-like, shape (n_rounds, len_list)\nreturn reward\n@@ -499,7 +516,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nand behavior_policy_logit_.shape == (n_rounds, self.n_unique_action)\n):\nraise ValueError(\"behavior_policy_logit_ has an invalid shape\")\n- # sample actions and calculate pscores\n+ # sample actions and calculate the three variants of the propensity scores\n(\naction,\npscore_cascade,\n@@ -571,15 +588,15 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nParameters\n-----------\nreward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t, k}`.\n+ Reward observed in each round and slot of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\nslate_id: array-like, shape (<= n_rounds * len_list,)\n- Slate id observed in each round of the logged bandit feedback.\n+ Slate ids of the logged bandit feedback.\nReturns\n----------\npolicy_value: float\n- The policy value of the given reward and slate_id.\n+ The on-policy policy value estimate of the behavior policy.\n\"\"\"\nif not isinstance(reward, np.ndarray):\n@@ -601,38 +618,31 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself,\nevaluation_policy_type: str,\ncontext: np.ndarray,\n- random_state: int,\n- epsilon: Optional[float] = 1.0,\naction: Optional[np.ndarray] = None,\n- slate_id: Optional[np.ndarray] = None,\n- position: Optional[np.ndarray] = None,\n+ epsilon: Optional[float] = 1.0,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n- \"\"\"Generate pscores of three types of evaluation policies ('random', 'optimal', 'anti-optimal').\n+ \"\"\"Generate the three variants of the propensity scores of synthetic evaluation policies (such as 'random', 'optimal', 'anti-optimal').\nParameters\n-----------\nevaluation_policy_type: str\nType of evaluation policy, which must be one of 'optimal', 'anti-optimal', or 'random'.\n- When 'optimal' is given, we sort actions by their base expected rewards (outputs of `base_reward_function`) and extract top-L actions (L=`len_list`) for each slate.\n- When 'anti-optimal' is given, we sort actions by their base expected rewards (outputs of `base_reward_function`) and extract bottom-L actions (L=`len_list`) for each slate.\n- We calculate three propensity scores of the epsilon-greedy policy.\n- When 'random' is given, we calculate three propensity scores of the random policy.\n+ When 'optimal' is given, we sort actions based on the base expected rewards (outputs of `base_reward_function`) and extract top-L actions (L=`len_list`) for each slate.\n+ When 'anti-optimal' is given, we sort actions based on the base expected rewards (outputs of `base_reward_function`) and extract bottom-L actions (L=`len_list`) for each slate.\n+ We calculate the three variants of the propensity scores (pscore, pscore_item_position, and pscore_cascade) of the epsilon-greedy policy when either 'optimal' or 'anti-optimal' is given.\n+ When 'random' is given, we calculate the three variants of the propensity scores of the uniform random policy.\ncontext: array-like, shape (n_rounds, dim_context)\nContext vectors characterizing each round (such as user information).\n- random_state: int\n- Controls the random seed in sampling synthetic slate bandit dataset.\n-\n- epsilon: float, default=1.\n- Exploration hyperparameter that must take value in the range of [0., 1.].\n- When evaluation_policy_type is 'random', this argument is unnecessary.\n-\naction: array-like, shape (n_rounds * len_list,), default=None\nActions sampled by a behavior policy.\nAction list of slate `i` is stored in action[`i` * `len_list`: (`i + 1`) * `len_list`].\n- When evaluation_policy_type is 'random', this argument is unnecessary.\n+ When evaluation_policy_type is 'random', this is unnecessary.\n+ epsilon: float, default=1.\n+ Exploration hyperparameter that must take value in the range of [0., 1.].\n+ When evaluation_policy_type is 'random', this is unnecessary.\nReturns\n----------\n@@ -657,26 +667,32 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\"context must be 2-dimensional ndarray\")\n# [Caution]: OverflowError raises when integer division result is too large for a float\n- cascade_npr = [\n- factorial(self.n_unique_action) / factorial(self.n_unique_action - x - 1)\n- for x in np.arange(self.len_list)\n- ]\n- random_pscore = np.ones(context.shape[0] * self.len_list) / cascade_npr[-1]\n+ random_pscore_cascade = (\n+ 1.0\n+ / np.tile(\n+ np.arange(\n+ self.n_unique_action, self.n_unique_action - self.len_list, -1\n+ ),\n+ (context.shape[0], 1),\n+ )\n+ .cumprod(axis=1)\n+ .flatten()\n+ )\n+ random_pscore = np.ones(context.shape[0] * self.len_list) / perm(\n+ self.n_unique_action, self.len_list\n+ )\nrandom_pscore_item_position = (\nnp.ones(context.shape[0] * self.len_list) / self.n_unique_action\n)\n- random_pscore_cascade = 1.0 / np.tile(cascade_npr, context.shape[0])\nif evaluation_policy_type == \"random\":\n- pscore = random_pscore\n- pscore_item_position = random_pscore_item_position\n- pscore_cascade = random_pscore_cascade\n+ return random_pscore, random_pscore_item_position, random_pscore_cascade\nelse:\n# base_expected_reward: array-like, shape (n_rounds, n_unique_action)\nbase_expected_reward = self.base_reward_function(\ncontext=context,\naction_context=self.action_context,\n- random_state=random_state,\n+ random_state=self.random_state,\n)\nif (\nnot isinstance(action, np.ndarray)\n@@ -691,31 +707,17 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\n\"the size of axis 0 of context must be the same as that of action_2d\"\n)\n- if set([np.unique(x).shape[0] for x in action_2d]) != set([self.len_list]):\n- raise ValueError(\"actions of each slate must not be duplicated\")\ncheck_scalar(\nepsilon, name=\"epsilon\", target_type=(float), min_val=0.0, max_val=1.0\n)\nif evaluation_policy_type == \"optimal\":\nsorted_actions = base_expected_reward.argsort(axis=1)[\n- :, -self.len_list :\n+ :, : self.len_list\n]\n- (\n- pscore,\n- pscore_item_position,\n- pscore_cascade,\n- ) = self._calc_epsilon_greedy_pscore(\n- epsilon=epsilon,\n- action_2d=action_2d,\n- sorted_actions=sorted_actions,\n- random_pscore=random_pscore,\n- random_pscore_item_position=random_pscore_item_position,\n- random_pscore_cascade=random_pscore_cascade,\n- )\nelse:\nsorted_actions = base_expected_reward.argsort(axis=1)[\n- :, : self.len_list\n+ :, -self.len_list :\n]\n(\npscore,\n@@ -740,7 +742,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nrandom_pscore_item_position: np.ndarray,\nrandom_pscore_cascade: np.ndarray,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n- \"\"\"Calculate pscores given action_2d, sorted_actions, and random pscores.\n+ \"\"\"Calculate the three variants of the propensity scores of synthetic evaluation policies based on the epsilon-greedy rule.\nParameters\n-----------\n@@ -786,11 +788,11 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\"action_2d must be 2-dimensional ndarray\")\nif set([np.unique(x).shape[0] for x in action_2d]) != set([self.len_list]):\nraise ValueError(\"actions of each slate must not be duplicated\")\n- match_action_flg = sorted_actions == action_2d\n- pscore_flg = np.repeat(match_action_flg.all(axis=1), self.len_list)\n- pscore_item_position_flg = match_action_flg.flatten()\n- pscore_cascade_flg = match_action_flg.cumprod(axis=1).flatten()\n- # calculate pscores\n+ action_match_flag = sorted_actions == action_2d\n+ pscore_flg = np.repeat(action_match_flag.all(axis=1), self.len_list)\n+ pscore_item_position_flg = action_match_flag.flatten()\n+ pscore_cascade_flg = action_match_flag.cumprod(axis=1).flatten()\n+ # calculate the three variants of the propensity scores based on the given epsilon value\npscore = pscore_flg * (1 - epsilon) + epsilon * random_pscore\npscore_item_position = (\npscore_item_position_flg * (1 - epsilon)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | generalize click models and fix minor erros |
641,014 | 17.05.2021 16:12:30 | -32,400 | cd1ba090406f211053f05ccd59f65aaf632a0fe7 | add some tests about click models | [
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -13,7 +13,7 @@ from obp.dataset import (\nfrom obp.types import BanditFeedback\n-# n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description\n+# n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, eta, random_state, err, description\ninvalid_input_of_init = [\n(\n\"4\",\n@@ -22,7 +22,9 @@ invalid_input_of_init = [\n\"binary\",\n\"independent\",\n\"pbm\",\n+ 1.0,\n1,\n+ ValueError,\n\"n_unique_action must be an integer larger than 1\",\n),\n(\n@@ -32,7 +34,9 @@ invalid_input_of_init = [\n\"binary\",\n\"independent\",\n\"pbm\",\n+ 1.0,\n1,\n+ ValueError,\n\"n_unique_action must be an integer larger than 1\",\n),\n(\n@@ -42,7 +46,9 @@ invalid_input_of_init = [\n\"binary\",\n\"independent\",\n\"pbm\",\n+ 1.0,\n1,\n+ ValueError,\n\"len_list must be an integer such that\",\n),\n(\n@@ -52,7 +58,9 @@ invalid_input_of_init = [\n\"binary\",\n\"independent\",\n\"pbm\",\n+ 1.0,\n1,\n+ ValueError,\n\"len_list must be an integer such that\",\n),\n(\n@@ -62,7 +70,9 @@ invalid_input_of_init = [\n\"binary\",\n\"independent\",\n\"pbm\",\n+ 1.0,\n1,\n+ ValueError,\n\"len_list must be an integer such that\",\n),\n(\n@@ -72,7 +82,9 @@ invalid_input_of_init = [\n\"binary\",\n\"independent\",\n\"pbm\",\n+ 1.0,\n1,\n+ ValueError,\n\"dim_context must be a positive integer\",\n),\n(\n@@ -82,19 +94,100 @@ invalid_input_of_init = [\n\"binary\",\n\"independent\",\n\"pbm\",\n+ 1.0,\n1,\n+ ValueError,\n\"dim_context must be a positive integer\",\n),\n- (5, 3, 2, \"aaa\", \"independent\", \"pbm\", 1, \"reward_type must be either\"),\n- (5, 3, 2, \"binary\", \"aaa\", \"pbm\", 1, \"reward_structure must be one of\"),\n- (5, 3, 2, \"binary\", \"independent\", \"aaa\", 1, \"click_model must be one of\"),\n- (5, 3, 2, \"binary\", \"independent\", \"pbm\", \"x\", \"random_state must be an integer\"),\n- (5, 3, 2, \"binary\", \"independent\", \"pbm\", None, \"random_state must be an integer\"),\n+ (\n+ 5,\n+ 3,\n+ 2,\n+ \"aaa\",\n+ \"independent\",\n+ \"pbm\",\n+ 1.0,\n+ 1,\n+ ValueError,\n+ \"reward_type must be either\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"aaa\",\n+ \"pbm\",\n+ 1.0,\n+ 1,\n+ ValueError,\n+ \"reward_structure must be one of\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"aaa\",\n+ 1.0,\n+ 1,\n+ ValueError,\n+ \"click_model must be one of\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ \"aaa\",\n+ 1,\n+ TypeError,\n+ \"`eta` must be an instance of <class 'float'>, not <class 'str'>.\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ -1.0,\n+ 1,\n+ ValueError,\n+ \"`eta`= -1.0, must be >= 0.0.\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1.0,\n+ \"x\",\n+ ValueError,\n+ \"random_state must be an integer\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"pbm\",\n+ 1.0,\n+ None,\n+ ValueError,\n+ \"random_state must be an integer\",\n+ ),\n]\[email protected](\n- \"n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description\",\n+ \"n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, eta, random_state, err, description\",\ninvalid_input_of_init,\n)\ndef test_synthetic_slate_init_using_invalid_inputs(\n@@ -104,10 +197,12 @@ def test_synthetic_slate_init_using_invalid_inputs(\nreward_type,\nreward_structure,\nclick_model,\n+ eta,\nrandom_state,\n+ err,\ndescription,\n):\n- with pytest.raises(ValueError, match=f\"{description}*\"):\n+ with pytest.raises(err, match=f\"{description}*\"):\n_ = SyntheticSlateBanditDataset(\nn_unique_action=n_unique_action,\nlen_list=len_list,\n@@ -115,6 +210,7 @@ def test_synthetic_slate_init_using_invalid_inputs(\nreward_type=reward_type,\nreward_structure=reward_structure,\nclick_model=click_model,\n+ eta=eta,\nrandom_state=random_state,\n)\n@@ -359,7 +455,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nassert set(np.unique(bandit_feedback[\"reward\"])) == set([0, 1])\n-# n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, behavior_policy_function, reward_function, return_pscore_item_position, description\n+# n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, eta, behavior_policy_function, reward_function, return_pscore_item_position, description\nvalid_input_of_obtain_batch_bandit_feedback = [\n(\n10,\n@@ -370,6 +466,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_additive\",\nNone,\n+ 1.0,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -384,6 +481,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"independent\",\nNone,\n+ 1.0,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -398,6 +496,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\nNone,\n+ 1.0,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -412,6 +511,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_additive\",\nNone,\n+ 1.0,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -426,6 +526,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"independent\",\nNone,\n+ 1.0,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -440,6 +541,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\nNone,\n+ 1.0,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -454,6 +556,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\nNone,\n+ 0.0,\nNone,\nNone,\nFalse,\n@@ -468,6 +571,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_exponential\",\nNone,\n+ 0.0,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -482,6 +586,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_exponential\",\nNone,\n+ 0.0,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -496,6 +601,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_exponential\",\nNone,\n+ 0.0,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -510,6 +616,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_exponential\",\nNone,\n+ 0.0,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n@@ -524,6 +631,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\n\"cascade\",\n+ 0.0,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -538,6 +646,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_exponential\",\n\"cascade\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -552,6 +661,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_additive\",\n\"cascade\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -566,6 +676,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_exponential\",\n\"cascade\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -580,6 +691,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"independent\",\n\"cascade\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -594,6 +706,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_additive\",\n\"pbm\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -608,6 +721,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"cascade_exponential\",\n\"pbm\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -622,6 +736,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_additive\",\n\"pbm\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -636,6 +751,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"standard_exponential\",\n\"pbm\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -650,6 +766,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n1000,\n\"independent\",\n\"pbm\",\n+ 0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n@@ -659,7 +776,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\[email protected](\n- \"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, behavior_policy_function, reward_function, return_pscore_item_position, description\",\n+ \"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, eta, behavior_policy_function, reward_function, return_pscore_item_position, description\",\nvalid_input_of_obtain_batch_bandit_feedback,\n)\ndef test_synthetic_slate_using_valid_inputs(\n@@ -671,6 +788,7 @@ def test_synthetic_slate_using_valid_inputs(\nn_rounds,\nreward_structure,\nclick_model,\n+ eta,\nbehavior_policy_function,\nreward_function,\nreturn_pscore_item_position,\n@@ -683,6 +801,7 @@ def test_synthetic_slate_using_valid_inputs(\nreward_type=reward_type,\nreward_structure=reward_structure,\nclick_model=click_model,\n+ eta=eta,\nrandom_state=random_state,\nbehavior_policy_function=behavior_policy_function,\nbase_reward_function=reward_function,\n@@ -812,14 +931,13 @@ def test_calc_on_policy_policy_value_using_valid_input_data(\n)\n-# evaluation_policy_type, epsilon, context, action, random_state, err, description\n+# evaluation_policy_type, epsilon, context, action, err, description\ninvalid_input_of_generate_evaluation_policy_pscore = [\n(\n\"awesome\", #\n1.0,\nnp.ones([5, 2]),\nnp.tile(np.arange(3), 5),\n- 1.0,\nValueError,\n\"evaluation_policy_type must be\",\n),\n@@ -828,7 +946,6 @@ invalid_input_of_generate_evaluation_policy_pscore = [\n1.0,\nnp.array([5, 2]), #\nnp.tile(np.arange(3), 5),\n- 1.0,\nValueError,\n\"context must be 2-dimensional ndarray\",\n),\n@@ -837,7 +954,6 @@ invalid_input_of_generate_evaluation_policy_pscore = [\n1.0,\nnp.ones([5, 2]),\nnp.ones([5, 2]), #\n- 1,\nValueError,\n\"action must be 1-dimensional ndarray\",\n),\n@@ -846,7 +962,6 @@ invalid_input_of_generate_evaluation_policy_pscore = [\n1.0,\nnp.ones([5, 2]),\nnp.random.choice(5), #\n- 1,\nValueError,\n\"action must be 1-dimensional ndarray\",\n),\n@@ -855,33 +970,38 @@ invalid_input_of_generate_evaluation_policy_pscore = [\n1.0,\nnp.ones([5, 2]),\nnp.ones(5), #\n- 1,\nValueError,\n\"action must be 1-dimensional ndarray, shape (n_rounds * len_list)\",\n),\n(\n\"optimal\",\n- 1.0,\n+ \"aaa\", #\nnp.ones([5, 2]),\n- np.ones(15), #\n- 1,\n- ValueError,\n- \"actions of each slate must not be duplicated\",\n+ np.tile(np.arange(3), 5),\n+ TypeError,\n+ \"`epsilon` must be an instance of <class 'float'>, not <class 'str'>.\",\n),\n(\n\"optimal\",\n-1.0, #\nnp.ones([5, 2]),\nnp.tile(np.arange(3), 5),\n- 1,\nValueError,\n- \"\",\n+ \"`epsilon`= -1.0, must be >= 0.0.\",\n+ ),\n+ (\n+ \"optimal\",\n+ 2.0, #\n+ np.ones([5, 2]),\n+ np.tile(np.arange(3), 5),\n+ ValueError,\n+ \"`epsilon`= 2.0, must be <= 1.0.\",\n),\n]\[email protected](\n- \"evaluation_policy_type, epsilon, context, action, random_state, err, description\",\n+ \"evaluation_policy_type, epsilon, context, action, err, description\",\ninvalid_input_of_generate_evaluation_policy_pscore,\n)\ndef test_generate_evaluation_policy_pscore_using_invalid_input_data(\n@@ -889,7 +1009,6 @@ def test_generate_evaluation_policy_pscore_using_invalid_input_data(\nepsilon,\ncontext,\naction,\n- random_state,\nerr,\ndescription,\n) -> None:\n@@ -907,23 +1026,12 @@ def test_generate_evaluation_policy_pscore_using_invalid_input_data(\nrandom_state=random_state,\nbase_reward_function=logistic_reward_function,\n)\n- if description == \"\":\n- with pytest.raises(err):\n- _ = dataset.generate_evaluation_policy_pscore(\n- evaluation_policy_type=evaluation_policy_type,\n- epsilon=epsilon,\n- context=context,\n- action=action,\n- random_state=random_state,\n- )\n- else:\nwith pytest.raises(err, match=f\"{description}*\"):\n_ = dataset.generate_evaluation_policy_pscore(\nevaluation_policy_type=evaluation_policy_type,\nepsilon=epsilon,\ncontext=context,\naction=action,\n- random_state=random_state,\n)\n@@ -993,7 +1101,6 @@ def test_generate_evaluation_policy_pscore_using_valid_input_data(\n) = dataset.generate_evaluation_policy_pscore(\nevaluation_policy_type=evaluation_policy_type,\ncontext=bandit_feedback[\"context\"],\n- random_state=random_state,\nepsilon=epsilon,\naction=bandit_feedback[\"action\"],\n)\n@@ -1009,6 +1116,45 @@ def test_generate_evaluation_policy_pscore_using_valid_input_data(\nassert len(set(np.unique(pscore)) - set([0.0, 1.0])) == 0\nassert len(set(np.unique(pscore_item_position)) - set([0.0, 1.0])) == 0\nassert len(set(np.unique(pscore_cascade)) - set([0.0, 1.0])) == 0\n+ # check pscores\n+ assert (\n+ pscore_cascade < pscore\n+ ).sum() == 0, \"pscore must be smaller than or equal to pscore_cascade\"\n+ assert (\n+ pscore_item_position < pscore\n+ ).sum() == 0, \"pscore must be smaller than or equal to pscore_item_position\"\n+ assert (\n+ pscore_item_position < pscore_cascade\n+ ).sum() == 0, \"pscore_cascade must be smaller than or equal to pscore_item_position\"\n+\n+ # check slate bandit feedback (common test)\n+ check_slate_bandit_feedback(bandit_feedback=bandit_feedback)\n+ bandit_feedback_df = pd.DataFrame()\n+ for column in [\"slate_id\", \"position\", \"action\"]:\n+ bandit_feedback_df[column] = bandit_feedback[column]\n+ bandit_feedback_df[\"pscore\"] = pscore\n+ bandit_feedback_df[\"pscore_cascade\"] = pscore_cascade\n+ bandit_feedback_df[\"pscore_item_position\"] = pscore_item_position\n+\n+ previous_minimum_pscore_cascade = (\n+ bandit_feedback_df.groupby(\"slate_id\")[\"pscore_cascade\"]\n+ .expanding()\n+ .min()\n+ .values\n+ )\n+ assert (\n+ previous_minimum_pscore_cascade < pscore_cascade\n+ ).sum() == 0, \"pscore_cascade must be non-decresing sequence in each slate\"\n+ count_pscore_in_expression = bandit_feedback_df.groupby(\"slate_id\").apply(\n+ lambda x: x[\"pscore\"].unique().shape[0]\n+ )\n+ assert (\n+ count_pscore_in_expression != 1\n+ ).sum() == 0, \"pscore must be unique in each slate\"\n+ last_slot_feedback_df = bandit_feedback_df.drop_duplicates(\"slate_id\", keep=\"last\")\n+ assert (\n+ last_slot_feedback_df[\"pscore\"] != last_slot_feedback_df[\"pscore_cascade\"]\n+ ).sum() == 0, \"pscore must be the same as pscore_cascade in the last slot\"\n# n_unique_action, len_list, epsilon, action_2d, sorted_actions, random_pscore, random_pscore_item_position, random_pscore_cascade, true_pscore, true_pscore_item_position, true_pscore_cascade, description\n@@ -1041,7 +1187,7 @@ valid_input_of_calc_epsilon_greedy_pscore = [\n[0.1 / 5, 0.1 / 20, 0.1 / 60],\n]\n).flatten(),\n- \"epsolon is 0.1\",\n+ \"epsilon is 0.1\",\n),\n]\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add some tests about click models |
641,014 | 17.05.2021 16:27:17 | -32,400 | 71e644fb42b96e8be6ac81f6f353fee20a912a24 | fix eng in synthetic_slate.ipynb | [
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/synthetic_slate.ipynb",
"new_path": "examples/quickstart/synthetic_slate.ipynb",
"diff": "\"---\\n\",\n\"This notebook provides an example of conducting OPE of several different evaluation policies with synthetic slate bandit feedback data.\\n\",\n\"\\n\",\n- \"Our example with synthetic bandit data contains the following four major steps:\\n\",\n+ \"Our example with synthetic bandit data contains the follwoing four major steps:\\n\",\n\"- (1) Synthetic Slate Data Generation\\n\",\n- \"- (2) Evaluation Policy Definition (Random Policy)\\n\",\n+ \"- (2) Defining Evaluation Policy\\n\",\n\"- (3) Off-Policy Evaluation\\n\",\n\"- (4) Evaluation of OPE Estimators\\n\",\n\"\\n\",\n- \"The second step could be replaced by some Off-Policy Learning (OPL) step, but obp still does not implement any OPL module for slate bandit data.\\n\",\n+ \"The second step could be replaced by some Off-Policy Learning (OPL) step, but obp still does not implement any OPL module for slate bandit data. Implementing OPL for slate bandit data is our future work.\\n\",\n\"\\n\",\n- \"Please see [../examples/synthetic](../synthetic) for a more sophisticated example of the evaluation of OPE with synthetic slate bandit data.\"\n+ \"Please see [../examples/synthetic_slate](../synthetic_slate) for a more sophisticated example of the evaluation of OPE with synthetic slate bandit data.\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"We can observe that the variance of three estimators is as follows: `sips > rips > iips`.\"\n+ \"We can observe that the variance of the three estimators is as follows: `sips > rips > iips`.\"\n]\n},\n{\n\"## (4) Evaluation of OPE estimators\\n\",\n\"Our final step is **the evaluation of OPE**, which evaluates and compares the estimation accuracy of OPE estimators.\\n\",\n\"\\n\",\n- \"We obtain a policy value by generating synthetic dataset with the policies.\"\n+ \"First, we can obtain the policy value of the evaluation policy as follows.\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"Please see [../examples/synthetic](../synthetic) for a more sophisticated example of the evaluation of OPE with synthetic slate bandit data.\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"## (5) Advanced Evaluation of OPE Estimators using various types of reward assumptions\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"The performance of slate OPE estimators could depend on the reward assumptions.\\n\",\n- \"1. With cascading reward structure, RIPS should perform best.\\n\",\n- \"2. With independent reward structure, IIPS should perform best.\\n\",\n- \"3. With standard reward structure, SIPS should perform best.\"\n+ \"## (5) Advanced Evaluation of OPE Estimators using various types of reward assumptions\\n\",\n+ \"\\n\",\n+ \"The performance of the OPE estimators is expected to depend on the reward assumptions as follows.\\n\",\n+ \"\\n\",\n+ \"1. When reward structure is cascade, RIPS should be the most accurate estimator of the three.\\n\",\n+ \"2. When reward structure is independent, IIPS should be the most accurate estimator of the three.\\n\",\n+ \"\\n\",\n+ \"The first hypothesis was observed in the previous section.\"\n]\n},\n{\n\"source\": [\n\"Unfortunately, we cannot say that our hypotheses are true by this experiment.\\n\",\n\"\\n\",\n- \"We are going to add `n_rounds` and run experiments by using various random seeds.\"\n+ \"We are going to add `n_rounds` and run experiments by using various random seeds.\\n\",\n+ \"\\n\",\n+ \"Please see [../examples/synthetic_slate](../synthetic_slate) for a more sophisticated example of the evaluation of OPE with synthetic slate bandit data.\"\n]\n},\n{\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix eng in synthetic_slate.ipynb |
641,011 | 17.05.2021 18:56:18 | -32,400 | cb7f3caf6d4e2f60d62321122f84aaf135f1bc62 | add decay_function | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -9,7 +9,7 @@ from obp.dataset.multiclass import MultiClassToBanditReduction\nfrom obp.dataset.synthetic_slate import SyntheticSlateBanditDataset\nfrom obp.dataset.synthetic_slate import action_interaction_additive_reward_function\nfrom obp.dataset.synthetic_slate import linear_behavior_policy_logit\n-from obp.dataset.synthetic_slate import action_interaction_exponential_reward_function\n+from obp.dataset.synthetic_slate import action_interaction_decay_reward_function\n__all__ = [\n\"BaseBanditDataset\",\n@@ -23,5 +23,5 @@ __all__ = [\n\"SyntheticSlateBanditDataset\",\n\"action_interaction_additive_reward_function\",\n\"linear_behavior_policy_logit\",\n- \"action_interaction_exponential_reward_function\",\n+ \"action_interaction_decay_reward_function\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -14,7 +14,7 @@ from tqdm import tqdm\nfrom .base import BaseBanditDataset\nfrom ..types import BanditFeedback\n-from ..utils import softmax, sigmoid\n+from ..utils import softmax, sigmoid, exponential_decay_function\n@dataclass\n@@ -48,18 +48,26 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nThe mean parameter of the reward distribution is determined by the `reward_function` specified by the next argument.\nreward_structure: str, default='cascade_additive'\n- Type of reward structure, which must be one of 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', or 'standard_exponential'.\n+ Type of reward structure, which must be one of 'cascade_additive', 'cascade_decay', 'independent', 'standard_additive', or 'standard_decay'.\nWhen 'cascade_additive' or 'standard_additive' is given, additive action_interaction_weight_matrix (:math:`W \\\\in \\\\mathbb{R}^{\\\\text{n_unique_action} \\\\times \\\\text{n_unique_action}}`) is generated.\n- When 'cascade_exponential', 'standard_exponential', or 'independent' is given, exponential action_interaction_weight_matrix (:math:`\\\\in \\\\mathbb{R}^{\\\\text{len_list} \\\\times \\\\text{len_list}}`) is generated.\n- Expected reward is calculated as follows (:math:`f` is a base reward function of each item-position, and :math:`g` is a transform function):\n+ When 'cascade_decay', 'standard_decay', or 'independent' is given, decay action_interaction_weight_matrix (:math:`\\\\in \\\\mathbb{R}^{\\\\text{len_list} \\\\times \\\\text{len_list}}`) is generated.\n+ Expected reward is calculated as follows (:math:`f` is a base reward function of each item-position, :math:`g` is a transform function, and :math:`h` is a decay function):\n'cascade_additive': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j < k} W(a(k), a(j)))`.\n- 'cascade_exponential': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) - \\\\sum_{j < k} g^{-1}(f(x, a(j))) / \\\\exp(|k-j|))`.\n+ 'cascade_decay': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) - \\\\sum_{j < k} g^{-1}(f(x, a(j))) / h(|k-j|))`.\n'independent': :math:`q_k(x, a) = f(x, a(k))`\n'standard_additive': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) + \\\\sum_{j \\\\neq k} W(a(k), a(j)))`.\n- 'standard_exponential': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) - \\\\sum_{j \\\\neq k} g^{-1}(f(x, a(j))) / \\\\exp(|k-j|))`.\n+ 'standard_decay': :math:`q_k(x, a) = g(g^{-1}(f(x, a(k))) - \\\\sum_{j \\\\neq k} g^{-1}(f(x, a(j))) / h(|k-j|))`.\nWhen reward_type is 'continuous', transform function is the identity function.\nWhen reward_type is 'binary', transform function is the logit function.\n+ decay_function: Callable[[np.ndarray], np.ndarray], default=exponential_decay_function\n+ Decay function used for 'cascade_decay' and 'standard_decay' reward structures.\n+ For example, we have following decay_function implementations (k and j are positions of two slots).\n+ exponential_decay_function: :math:`h(|k-j|) = \\\\exp(-|k-j|)`.\n+ inverse_decay_function: :math:`h(|k-j|) = \\\\frac{1}{|k-j|+1})`.\n+ inverse_square_root_decay_function: :math:`h(|k-j|) = \\\\frac{1}{(|k-j|+1)^(1/2)})`.\n+ Users can also define their own decay_function.\n+\nclick_model: str, default=None\nType of click model, which must be one of None, 'pbm', or 'cascade'.\nWhen None is given, reward at each slot is sampled based on the original expected rewards.\n@@ -162,6 +170,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndim_context: int = 1\nreward_type: str = \"binary\"\nreward_structure: str = \"cascade_additive\"\n+ decay_function: Callable[[np.ndarray], np.ndarray] = exponential_decay_function\nclick_model: Optional[str] = None\neta: float = 1.0\nbase_reward_function: Optional[\n@@ -205,13 +214,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\nif self.reward_structure not in [\n\"cascade_additive\",\n- \"cascade_exponential\",\n+ \"cascade_decay\",\n\"independent\",\n\"standard_additive\",\n- \"standard_exponential\",\n+ \"standard_decay\",\n]:\nraise ValueError(\n- f\"reward_structure must be one of 'cascade_additive', 'cascade_exponential', 'independent', 'standard_additive', or 'standard_exponential', but {self.reward_structure} is given.\"\n+ f\"reward_structure must be one of 'cascade_additive', 'cascade_decay', 'independent', 'standard_additive', or 'standard_decay', but {self.reward_structure} is given.\"\n)\nif self.click_model not in [\"cascade\", \"pbm\", None]:\nraise ValueError(\n@@ -243,17 +252,17 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself.reward_function = action_interaction_additive_reward_function\nelse:\nif self.base_reward_function is not None:\n- self.reward_function = action_interaction_exponential_reward_function\n- # generate exponential action interaction weight matrix of (len_list, len_list)\n- if self.reward_structure == \"standard_exponential\":\n+ self.reward_function = action_interaction_decay_reward_function\n+ # generate decay action interaction weight matrix of (len_list, len_list)\n+ if self.reward_structure == \"standard_decay\":\nself.action_interaction_weight_matrix = (\n- self.obtain_standard_exponential_action_interaction_weight_matrix(\n+ self.obtain_standard_decay_action_interaction_weight_matrix(\nself.len_list\n)\n)\n- elif self.reward_structure == \"cascade_exponential\":\n+ elif self.reward_structure == \"cascade_decay\":\nself.action_interaction_weight_matrix = (\n- self.obtain_cascade_exponential_action_interaction_weight_matrix(\n+ self.obtain_cascade_decay_action_interaction_weight_matrix(\nself.len_list\n)\n)\n@@ -270,27 +279,27 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n# one-hot encoding representations characterizing each action\nself.action_context = np.eye(self.n_unique_action, dtype=int)\n- @staticmethod\n- def obtain_standard_exponential_action_interaction_weight_matrix(\n+ def obtain_standard_decay_action_interaction_weight_matrix(\n+ self,\nlen_list,\n) -> np.ndarray:\n- \"\"\"Obtain action interaction weight matrix for standard exponential reward structure (symmetric matrix)\"\"\"\n+ \"\"\"Obtain action interaction weight matrix for standard decay reward structure (symmetric matrix)\"\"\"\naction_interaction_weight_matrix = np.identity(len_list)\nfor position_ in np.arange(len_list):\n- action_interaction_weight_matrix[:, position_] = -1 / np.exp(\n+ action_interaction_weight_matrix[:, position_] = self.decay_function(\nnp.abs(np.arange(len_list) - position_)\n)\naction_interaction_weight_matrix[position_, position_] = 1\nreturn action_interaction_weight_matrix\n- @staticmethod\n- def obtain_cascade_exponential_action_interaction_weight_matrix(\n+ def obtain_cascade_decay_action_interaction_weight_matrix(\n+ self,\nlen_list,\n) -> np.ndarray:\n- \"\"\"Obtain action interaction weight matrix for cascade exponential reward structure (upper triangular matrix)\"\"\"\n+ \"\"\"Obtain action interaction weight matrix for cascade decay reward structure (upper triangular matrix)\"\"\"\naction_interaction_weight_matrix = np.identity(len_list)\nfor position_ in np.arange(len_list):\n- action_interaction_weight_matrix[:, position_] = -1 / np.exp(\n+ action_interaction_weight_matrix[:, position_] = self.decay_function(\nnp.abs(np.arange(len_list) - position_)\n)\nfor position_2 in np.arange(len_list):\n@@ -949,7 +958,7 @@ def action_interaction_additive_reward_function(\nreturn expected_reward_factual\n-def action_interaction_exponential_reward_function(\n+def action_interaction_decay_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\naction: np.ndarray,\n@@ -959,7 +968,7 @@ def action_interaction_exponential_reward_function(\nrandom_state: Optional[int] = None,\n**kwargs,\n) -> np.ndarray:\n- \"\"\"Reward function incorporating exponential interactions among combinatorial action\n+ \"\"\"Reward function incorporating decay interactions among combinatorial action\nParameters\n-----------\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -569,13 +569,13 @@ valid_input_of_obtain_batch_bandit_feedback = [\n\"binary\",\n123,\n1000,\n- \"cascade_exponential\",\n+ \"cascade_decay\",\nNone,\n0.0,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n- \"cascade_exponential (binary reward)\",\n+ \"cascade_decay (binary reward)\",\n),\n(\n10,\n@@ -584,13 +584,13 @@ valid_input_of_obtain_batch_bandit_feedback = [\n\"continuous\",\n123,\n1000,\n- \"cascade_exponential\",\n+ \"cascade_decay\",\nNone,\n0.0,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n- \"cascade_exponential (continuous reward)\",\n+ \"cascade_decay (continuous reward)\",\n),\n(\n10,\n@@ -599,13 +599,13 @@ valid_input_of_obtain_batch_bandit_feedback = [\n\"binary\",\n123,\n1000,\n- \"standard_exponential\",\n+ \"standard_decay\",\nNone,\n0.0,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n- \"standard_exponential (binary reward)\",\n+ \"standard_decay (binary reward)\",\n),\n(\n10,\n@@ -614,13 +614,13 @@ valid_input_of_obtain_batch_bandit_feedback = [\n\"continuous\",\n123,\n1000,\n- \"standard_exponential\",\n+ \"standard_decay\",\nNone,\n0.0,\nlinear_behavior_policy_logit,\nlinear_reward_function,\nFalse,\n- \"standard_exponential (continuous reward)\",\n+ \"standard_decay (continuous reward)\",\n),\n(\n10,\n@@ -644,13 +644,13 @@ valid_input_of_obtain_batch_bandit_feedback = [\n\"binary\",\n123,\n1000,\n- \"cascade_exponential\",\n+ \"cascade_decay\",\n\"cascade\",\n0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n- \"cascade_exponential, cascade click model (binary reward)\",\n+ \"cascade_decay, cascade click model (binary reward)\",\n),\n(\n10,\n@@ -674,13 +674,13 @@ valid_input_of_obtain_batch_bandit_feedback = [\n\"binary\",\n123,\n1000,\n- \"standard_exponential\",\n+ \"standard_decay\",\n\"cascade\",\n0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n- \"standard_exponential, cascade click model (binary reward)\",\n+ \"standard_decay, cascade click model (binary reward)\",\n),\n(\n10,\n@@ -719,13 +719,13 @@ valid_input_of_obtain_batch_bandit_feedback = [\n\"binary\",\n123,\n1000,\n- \"cascade_exponential\",\n+ \"cascade_decay\",\n\"pbm\",\n0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n- \"cascade_exponential, pbm click model (binary reward)\",\n+ \"cascade_decay, pbm click model (binary reward)\",\n),\n(\n10,\n@@ -749,13 +749,13 @@ valid_input_of_obtain_batch_bandit_feedback = [\n\"binary\",\n123,\n1000,\n- \"standard_exponential\",\n+ \"standard_decay\",\n\"pbm\",\n0.5,\nlinear_behavior_policy_logit,\nlogistic_reward_function,\nFalse,\n- \"standard_exponential, pbm click model (binary reward)\",\n+ \"standard_decay, pbm click model (binary reward)\",\n),\n(\n10,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate_functions.py",
"new_path": "tests/dataset/test_synthetic_slate_functions.py",
"diff": "@@ -7,7 +7,7 @@ from obp.dataset.synthetic import (\n)\nfrom obp.dataset.synthetic_slate import (\nlinear_behavior_policy_logit,\n- action_interaction_exponential_reward_function,\n+ action_interaction_decay_reward_function,\naction_interaction_additive_reward_function,\ngenerate_symmetric_matrix,\n)\n@@ -93,7 +93,7 @@ def test_linear_behavior_policy_logit_using_valid_input(\n# context, action_context, action, base_reward_function, action_interaction_weight_matrix, reward_type, random_state, err, description\n-invalid_input_of_action_interaction_exponential_reward_function = [\n+invalid_input_of_action_interaction_decay_reward_function = [\n(\nnp.array([5, 2]),\nnp.ones([4, 2]),\n@@ -154,9 +154,9 @@ invalid_input_of_action_interaction_exponential_reward_function = [\[email protected](\n\"context, action_context, action, base_reward_function, action_interaction_weight_matrix, reward_type, random_state, err, description\",\n- invalid_input_of_action_interaction_exponential_reward_function,\n+ invalid_input_of_action_interaction_decay_reward_function,\n)\n-def test_action_interaction_exponential_reward_function_using_invalid_input(\n+def test_action_interaction_decay_reward_function_using_invalid_input(\ncontext,\naction_context,\naction,\n@@ -168,7 +168,7 @@ def test_action_interaction_exponential_reward_function_using_invalid_input(\ndescription,\n):\nwith pytest.raises(err, match=f\"{description}*\"):\n- _ = action_interaction_exponential_reward_function(\n+ _ = action_interaction_decay_reward_function(\ncontext=context,\naction_context=action_context,\naction=action,\n@@ -180,7 +180,7 @@ def test_action_interaction_exponential_reward_function_using_invalid_input(\n# context, action_context, action, base_reward_function, action_interaction_weight_matrix, reward_type, random_state, description\n-valid_input_of_action_interaction_exponential_reward_function = [\n+valid_input_of_action_interaction_decay_reward_function = [\n(\nnp.ones([5, 2]),\nnp.ones([4, 2]),\n@@ -206,9 +206,9 @@ valid_input_of_action_interaction_exponential_reward_function = [\[email protected](\n\"context, action_context, action, base_reward_function, action_interaction_weight_matrix, reward_type, random_state, description\",\n- valid_input_of_action_interaction_exponential_reward_function,\n+ valid_input_of_action_interaction_decay_reward_function,\n)\n-def test_action_interaction_exponential_reward_function_using_valid_input(\n+def test_action_interaction_decay_reward_function_using_valid_input(\ncontext,\naction_context,\naction,\n@@ -218,7 +218,7 @@ def test_action_interaction_exponential_reward_function_using_valid_input(\nrandom_state,\ndescription,\n):\n- expected_reward_factual = action_interaction_exponential_reward_function(\n+ expected_reward_factual = action_interaction_decay_reward_function(\ncontext=context,\naction_context=action_context,\naction=action,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add decay_function |
641,011 | 18.05.2021 18:17:32 | -32,400 | 402140796514998e72291b443a83e42b9091c5bb | implement calc_ground_truth_policy_value function | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -316,10 +316,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_interaction_weight_matrix[position_, position_] = 1\nreturn action_interaction_weight_matrix\n- def calc_item_position_pscore(\n+ def calc_pscore_given_action_list(\nself, action_list: List[int], behavior_policy_logit_i_: np.ndarray\n) -> float:\n- \"\"\"Calculate the marginal propensity score, i.e., the probability that an action (specified by action_list) is presented at a position.\"\"\"\n+ \"\"\"Calculate the propensity score given combinatorial set of actions.\"\"\"\nunique_action_set = np.arange(self.n_unique_action)\npscore_ = 1.0\nfor action in action_list:\n@@ -414,12 +414,14 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n):\nif sampled_action != action_list[position_]:\ncontinue\n- pscore_item_position_i_l += self.calc_item_position_pscore(\n+ pscore_item_position_i_l += (\n+ self.calc_pscore_given_action_list(\naction_list=action_list,\nbehavior_policy_logit_i_=behavior_policy_logit_[\ni : i + 1\n],\n)\n+ )\npscore_item_position[\ni * self.len_list + position_\n] = pscore_item_position_i_l\n@@ -430,9 +432,20 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn action, pscore_cascade, pscore, pscore_item_position\n- def sample_contextfree_expected_reward(self) -> np.ndarray:\n- \"\"\"Sample expected reward for each action and slot from the uniform distribution\"\"\"\n- return self.random_.uniform(size=(self.n_unique_action, self.len_list))\n+ def sample_contextfree_expected_reward(\n+ self, random_state: Optional[int] = None\n+ ) -> np.ndarray:\n+ \"\"\"Sample expected reward for each action and slot from the uniform distribution\n+\n+ Parameters\n+ -----------\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ \"\"\"\n+ random_ = check_random_state(random_state)\n+ return random_.uniform(size=(self.n_unique_action, self.len_list))\ndef sample_reward_given_expected_reward(\nself, expected_reward_factual: np.ndarray\n@@ -548,7 +561,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\n# sample expected reward factual\nif self.base_reward_function is None:\n- expected_reward = self.sample_contextfree_expected_reward()\n+ expected_reward = self.sample_contextfree_expected_reward(\n+ random_state=self.random_state\n+ )\nexpected_reward_tile = np.tile(expected_reward, (n_rounds, 1, 1))\n# action_2d: array-like, shape (n_rounds, len_list)\naction_2d = action.reshape((n_rounds, self.len_list))\n@@ -634,6 +649,100 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn reward.sum() / np.unique(slate_id).shape[0]\n+ def calc_ground_truth_policy_value(\n+ self, evaluation_policy_logit: np.ndarray, context: np.adarray\n+ ):\n+ \"\"\"Calculate the ground-truth policy value of given evaluation policy logit and context\n+\n+ Parameters\n+ -----------\n+ evaluation_policy_logit: array-like, shape (n_rounds, n_actions)\n+ Evaluation policy function generating logit value of each action in action space.\n+\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ \"\"\"\n+ n_rounds = len(evaluation_policy_logit)\n+ policy_value = 0\n+\n+ for i in n_rounds:\n+ enumerated_slate_actions = np.array(\n+ [\n+ _\n+ for _ in permutations(\n+ np.arange(self.n_unique_action), self.len_list\n+ )\n+ ]\n+ )\n+ n_slate_actions = len(enumerated_slate_actions)\n+\n+ # calculate pscore for each combinatorial set of items (i.e., slate actions)\n+ pscores = []\n+ for action_list in enumerated_slate_actions:\n+ pscores.append(\n+ self._calc_pscore_given_action_list(\n+ action_list=action_list,\n+ policy_logit_i_=evaluation_policy_logit[i : i + 1],\n+ )\n+ )\n+ pscores = np.array(pscores)\n+\n+ # calculate expected slate-level reward for each combinatorial set of items (i.e., slate actions)\n+ if self.base_reward_function is None:\n+ expected_slot_reward = self.sample_contextfree_expected_reward(\n+ random_state=self.random_state\n+ )\n+ expected_slot_reward_tile = np.tile(\n+ expected_slot_reward, (n_slate_actions, 1, 1)\n+ )\n+ expected_slate_rewards = np.array(\n+ [\n+ expected_slot_reward_tile[\n+ np.arange(n_slate_actions),\n+ enumerated_slate_actions[:, position_],\n+ position_,\n+ ]\n+ for position_ in np.arange(self.len_list)\n+ ]\n+ ).T\n+ else:\n+ expected_slate_rewards = self.reward_function(\n+ context=np.tile(context[i], (n_slate_actions, 1)),\n+ action_context=self.action_context,\n+ action=n_slate_actions,\n+ action_interaction_weight_matrix=self.action_interaction_weight_matrix,\n+ base_reward_function=self.base_reward_function,\n+ is_cascade=\"cascade\" in self.reward_structure,\n+ reward_type=self.reward_type,\n+ len_list=self.len_list,\n+ random_state=self.random_state,\n+ )\n+ expected_slate_rewards = np.clip(\n+ expected_slate_rewards, 0, None\n+ ) # (n_slate_actions, self.len_list)\n+\n+ # click models based on expected reward\n+ expected_slate_rewards *= self.exam_weight\n+ if self.reward_type == \"binary\":\n+ discount_factors = np.ones(expected_slate_rewards.shape[0])\n+ previous_slot_expected_reward = np.zeros(\n+ expected_slate_rewards.shape[0]\n+ )\n+ for position_ in np.arange(self.len_list):\n+ discount_factors *= (\n+ previous_slot_expected_reward * self.attractiveness[position_]\n+ + (1 - previous_slot_expected_reward)\n+ )\n+ expected_slate_rewards[:, position_] = (\n+ discount_factors * expected_slate_rewards[:, position_]\n+ )\n+ previous_slot_expected_reward = expected_slate_rewards[:, position_]\n+\n+ policy_value += pscores * expected_slate_rewards.sum(axis=1)\n+\n+ return policy_value / n_rounds\n+\ndef generate_evaluation_policy_pscore(\nself,\nevaluation_policy_type: str,\n@@ -1112,3 +1221,17 @@ def linear_behavior_policy_logit(\nlogits[:, d] = context @ coef_ + action_context[d] @ action_coef_\nreturn logits / tau\n+\n+\n+def exponential_decay_function(distance: np.ndarray) -> np.ndarray:\n+ \"\"\"Calculate exponential discount factor.\n+ TODO\n+ \"\"\"\n+ return np.exp(-distance)\n+\n+\n+def inverse_decay_function(distance: np.ndarray) -> np.ndarray:\n+ \"\"\"Calculate inverse discount factor.\n+ TODO\n+ \"\"\"\n+ return 1 / (distance + 1)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement calc_ground_truth_policy_value function |
641,011 | 20.05.2021 17:07:21 | -32,400 | d3638e83f79ec53669d47e9cb9db0606f905e559 | implement obtain_pscore_given_evaluation_policy_logit function | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -341,6 +341,65 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\nreturn pscore_\n+ def obtain_pscore_given_evaluation_policy_logit(\n+ self,\n+ action: np.ndarray,\n+ evaluation_policy_logit_: np.ndarray,\n+ return_pscore_item_position: bool = True,\n+ ):\n+ n_rounds = action.reshape((-1, self.len_list)).shape[0]\n+ pscore_cascade = np.zeros_like(action)\n+ pscore = np.zeros_like(action)\n+ if return_pscore_item_position:\n+ pscore_item_position = np.zeros_like(action)\n+ else:\n+ pscore_item_position = None\n+ for i in tqdm(\n+ np.arange(n_rounds),\n+ desc=\"[obtain_pscore_by_evaluation_policy]\",\n+ total=n_rounds,\n+ ):\n+ unique_action_set = np.arange(self.n_unique_action)\n+ pscore_i = 1.0\n+ for position_ in np.arange(self.len_list):\n+ action_ = action[i * self.len_list + position_]\n+ action_index_ = np.where(unique_action_set == action_)[0][0]\n+ score_ = softmax(\n+ evaluation_policy_logit_[i : i + 1, unique_action_set]\n+ )[0][action_index_]\n+ # calculate joint pscore\n+ pscore_i *= score_\n+ pscore_cascade[i * self.len_list + position_] = pscore_i\n+ unique_action_set = np.delete(\n+ unique_action_set, unique_action_set == action_\n+ )\n+ # calculate marginal pscore\n+ if return_pscore_item_position:\n+ if position_ == 0:\n+ pscore_item_position_i_l = pscore_i\n+ else:\n+ pscore_item_position_i_l = 0.0\n+ for action_list in permutations(\n+ np.arange(self.n_unique_action), self.len_list\n+ ):\n+ if action_ != action_list[position_]:\n+ continue\n+ pscore_item_position_i_l += (\n+ self._calc_pscore_given_action_list(\n+ action_list=action_list,\n+ policy_logit_i_=evaluation_policy_logit_[i : i + 1],\n+ )\n+ )\n+ pscore_item_position[\n+ i * self.len_list + position_\n+ ] = pscore_item_position_i_l\n+ # impute joint pscore\n+ start_idx = i * self.len_list\n+ end_idx = start_idx + self.len_list\n+ pscore[start_idx:end_idx] = pscore_i\n+\n+ return pscore, pscore_item_position, pscore_cascade\n+\ndef sample_action_and_obtain_pscore(\nself,\nbehavior_policy_logit_: np.ndarray,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta_slate.py",
"new_path": "obp/ope/meta_slate.py",
"diff": "@@ -47,7 +47,7 @@ class SlateOffPolicyEvaluation:\n)\n# (1) Synthetic Data Generation\n- >>> dataset = dataset = SyntheticSlateBanditDataset(\n+ >>> dataset = SyntheticSlateBanditDataset(\nn_unique_action=10,\nlen_list=3,\ndim_context=2,\n@@ -444,7 +444,7 @@ class SlateOffPolicyEvaluation:\nParameters\n----------\n- ground_truth policy value: float\n+ ground_truth_policy_value: float\nGround_truth policy value of an evaluation policy, i.e., :math:`V(\\\\pi)`.\nWith Open Bandit Dataset, in general, we use an on-policy estimate of the policy value as its ground-truth.\n@@ -510,7 +510,7 @@ class SlateOffPolicyEvaluation:\nParameters\n----------\n- ground_truth policy value: float\n+ ground_truth_policy_value: float\nGround_truth policy value of an evaluation policy, i.e., :math:`V(\\\\pi)`.\nWith Open Bandit Dataset, in general, we use an on-policy estimate of the policy value as ground-truth.\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement obtain_pscore_given_evaluation_policy_logit function |
641,011 | 23.05.2021 11:40:59 | -32,400 | 22a89152e61526db495e6504577721f19e4ab28a | add tests and minor fix | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -354,7 +354,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction: array-like, (n_rounds * len_list, )\nAction chosen by behavior policy.\n- evaluation_policy_logit_: array-like, (n_rounds, n_unique_action, )\n+ evaluation_policy_logit_: array-like, (n_rounds, n_unique_action)\nEvaluation policy logit values by given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\nreturn_pscore_item_position: bool, default=True\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -13,7 +13,7 @@ from obp.dataset import (\nfrom obp.types import BanditFeedback\n-# n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, eta, random_state, err, description\n+# n_unique_action, len_list, dim_context, reward_type, reward_structure, decay_function, click_model, eta, random_state, err, description\ninvalid_input_of_init = [\n(\n\"4\",\n@@ -21,6 +21,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -33,6 +34,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -45,6 +47,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -57,6 +60,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -69,6 +73,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -81,6 +86,7 @@ invalid_input_of_init = [\n0,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -93,6 +99,7 @@ invalid_input_of_init = [\n\"2\",\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -105,6 +112,7 @@ invalid_input_of_init = [\n2,\n\"aaa\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -117,6 +125,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"aaa\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n1,\n@@ -130,6 +139,20 @@ invalid_input_of_init = [\n\"binary\",\n\"independent\",\n\"aaa\",\n+ \"pbm\",\n+ 1.0,\n+ 1,\n+ ValueError,\n+ \"decay_function must be either\",\n+ ),\n+ (\n+ 5,\n+ 3,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ \"exponential\",\n+ \"aaa\",\n1.0,\n1,\nValueError,\n@@ -141,6 +164,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n\"aaa\",\n1,\n@@ -153,6 +177,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n-1.0,\n1,\n@@ -165,6 +190,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\n\"x\",\n@@ -177,6 +203,7 @@ invalid_input_of_init = [\n2,\n\"binary\",\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n1.0,\nNone,\n@@ -187,7 +214,7 @@ invalid_input_of_init = [\[email protected](\n- \"n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, eta, random_state, err, description\",\n+ \"n_unique_action, len_list, dim_context, reward_type, reward_structure, decay_function, click_model, eta, random_state, err, description\",\ninvalid_input_of_init,\n)\ndef test_synthetic_slate_init_using_invalid_inputs(\n@@ -196,6 +223,7 @@ def test_synthetic_slate_init_using_invalid_inputs(\ndim_context,\nreward_type,\nreward_structure,\n+ decay_function,\nclick_model,\neta,\nrandom_state,\n@@ -209,6 +237,7 @@ def test_synthetic_slate_init_using_invalid_inputs(\ndim_context=dim_context,\nreward_type=reward_type,\nreward_structure=reward_structure,\n+ decay_function=decay_function,\nclick_model=click_model,\neta=eta,\nrandom_state=random_state,\n@@ -455,7 +484,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\nassert set(np.unique(bandit_feedback[\"reward\"])) == set([0, 1])\n-# n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, eta, behavior_policy_function, reward_function, return_pscore_item_position, description\n+# n_unique_action, len_list, dim_context, reward_type, decay_function, random_state, n_rounds, reward_structure, click_model, eta, behavior_policy_function, reward_function, return_pscore_item_position, description\nvalid_input_of_obtain_batch_bandit_feedback = [\n(\n10,\n@@ -465,6 +494,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_additive\",\n+ \"exponential\",\nNone,\n1.0,\nlinear_behavior_policy_logit,\n@@ -480,6 +510,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"independent\",\n+ \"exponential\",\nNone,\n1.0,\nlinear_behavior_policy_logit,\n@@ -495,6 +526,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_additive\",\n+ \"exponential\",\nNone,\n1.0,\nlinear_behavior_policy_logit,\n@@ -510,6 +542,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_additive\",\n+ \"exponential\",\nNone,\n1.0,\nlinear_behavior_policy_logit,\n@@ -525,6 +558,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"independent\",\n+ \"exponential\",\nNone,\n1.0,\nlinear_behavior_policy_logit,\n@@ -540,6 +574,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_additive\",\n+ \"exponential\",\nNone,\n1.0,\nlinear_behavior_policy_logit,\n@@ -555,6 +590,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_additive\",\n+ \"exponential\",\nNone,\n0.0,\nNone,\n@@ -570,6 +606,23 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_decay\",\n+ \"exponential\",\n+ None,\n+ 0.0,\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"cascade_decay (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"cascade_decay\",\n+ \"inverse\",\nNone,\n0.0,\nlinear_behavior_policy_logit,\n@@ -585,6 +638,23 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_decay\",\n+ \"exponential\",\n+ None,\n+ 0.0,\n+ linear_behavior_policy_logit,\n+ linear_reward_function,\n+ False,\n+ \"cascade_decay (continuous reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"continuous\",\n+ 123,\n+ 1000,\n+ \"cascade_decay\",\n+ \"inverse\",\nNone,\n0.0,\nlinear_behavior_policy_logit,\n@@ -600,6 +670,23 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_decay\",\n+ \"exponential\",\n+ None,\n+ 0.0,\n+ linear_behavior_policy_logit,\n+ logistic_reward_function,\n+ False,\n+ \"standard_decay (binary reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"binary\",\n+ 123,\n+ 1000,\n+ \"standard_decay\",\n+ \"inverse\",\nNone,\n0.0,\nlinear_behavior_policy_logit,\n@@ -615,6 +702,23 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_decay\",\n+ \"exponential\",\n+ None,\n+ 0.0,\n+ linear_behavior_policy_logit,\n+ linear_reward_function,\n+ False,\n+ \"standard_decay (continuous reward)\",\n+ ),\n+ (\n+ 10,\n+ 3,\n+ 2,\n+ \"continuous\",\n+ 123,\n+ 1000,\n+ \"standard_decay\",\n+ \"inverse\",\nNone,\n0.0,\nlinear_behavior_policy_logit,\n@@ -630,6 +734,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_additive\",\n+ \"exponential\",\n\"cascade\",\n0.0,\nlinear_behavior_policy_logit,\n@@ -645,6 +750,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_decay\",\n+ \"exponential\",\n\"cascade\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -660,6 +766,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_additive\",\n+ \"exponential\",\n\"cascade\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -675,6 +782,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_decay\",\n+ \"exponential\",\n\"cascade\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -690,6 +798,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"independent\",\n+ \"exponential\",\n\"cascade\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -705,6 +814,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_additive\",\n+ \"exponential\",\n\"pbm\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -720,6 +830,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"cascade_decay\",\n+ \"exponential\",\n\"pbm\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -735,6 +846,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_additive\",\n+ \"exponential\",\n\"pbm\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -750,6 +862,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"standard_decay\",\n+ \"exponential\",\n\"pbm\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -765,6 +878,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\n123,\n1000,\n\"independent\",\n+ \"exponential\",\n\"pbm\",\n0.5,\nlinear_behavior_policy_logit,\n@@ -776,7 +890,7 @@ valid_input_of_obtain_batch_bandit_feedback = [\[email protected](\n- \"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, click_model, eta, behavior_policy_function, reward_function, return_pscore_item_position, description\",\n+ \"n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, decay_function, click_model, eta, behavior_policy_function, reward_function, return_pscore_item_position, description\",\nvalid_input_of_obtain_batch_bandit_feedback,\n)\ndef test_synthetic_slate_using_valid_inputs(\n@@ -787,6 +901,7 @@ def test_synthetic_slate_using_valid_inputs(\nrandom_state,\nn_rounds,\nreward_structure,\n+ decay_function,\nclick_model,\neta,\nbehavior_policy_function,\n@@ -800,6 +915,7 @@ def test_synthetic_slate_using_valid_inputs(\ndim_context=dim_context,\nreward_type=reward_type,\nreward_structure=reward_structure,\n+ decay_function=decay_function,\nclick_model=click_model,\neta=eta,\nrandom_state=random_state,\n@@ -835,7 +951,7 @@ def test_synthetic_slate_using_valid_inputs(\nn_rounds = 5\nlen_list = 3\n# slate_id, reward, description\n-invalid_input_of_calc_true_policy_value = [\n+invalid_input_of_calc_on_policy_policy_value = [\n(\nnp.repeat(np.arange(n_rounds), len_list),\n\"4\", #\n@@ -866,7 +982,7 @@ invalid_input_of_calc_true_policy_value = [\[email protected](\n\"slate_id, reward, description\",\n- invalid_input_of_calc_true_policy_value,\n+ invalid_input_of_calc_on_policy_policy_value,\n)\ndef test_calc_on_policy_policy_value_using_invalid_input_data(\nslate_id, reward, description\n@@ -889,7 +1005,7 @@ def test_calc_on_policy_policy_value_using_invalid_input_data(\n# slate_id, reward, description\n-valid_input_of_calc_true_policy_value = [\n+valid_input_of_calc_on_policy_policy_value = [\n(\nnp.array([1, 1, 2, 2, 3, 4]),\nnp.array([0, 1, 1, 0, 0, 0]),\n@@ -907,7 +1023,7 @@ valid_input_of_calc_true_policy_value = [\[email protected](\n\"slate_id, reward, result, description\",\n- valid_input_of_calc_true_policy_value,\n+ valid_input_of_calc_on_policy_policy_value,\n)\ndef test_calc_on_policy_policy_value_using_valid_input_data(\nslate_id, reward, result, description\n@@ -1239,6 +1355,92 @@ def test_calc_epsilon_greedy_pscore_using_valid_input_data(\nassert np.allclose(true_pscore_cascade, pscore_cascade)\n+# n_rounds, n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, evaluation_policy_logit, context, description\n+invalid_input_of_calc_ground_truth_policy_value = [\n+ (\n+ 3,\n+ 3,\n+ 2,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ None,\n+ np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3]]),\n+ np.ones((3, 2)),\n+ None,\n+ ),\n+ (\n+ 4,\n+ 3,\n+ 2,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ None,\n+ np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3]]),\n+ np.ones((3, 2)),\n+ None,\n+ ),\n+ (\n+ 3,\n+ 2,\n+ 2,\n+ 2,\n+ \"binary\",\n+ \"independent\",\n+ None,\n+ np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3]]),\n+ np.ones((3, 2)),\n+ None,\n+ ),\n+ (\n+ 3,\n+ 3,\n+ 2,\n+ 1,\n+ \"binary\",\n+ \"independent\",\n+ None,\n+ np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3]]),\n+ np.ones((3, 2)),\n+ None,\n+ ),\n+]\n+\n+\[email protected](\n+ \"n_rounds, n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, evaluation_policy_logit, context, description\",\n+ invalid_input_of_calc_ground_truth_policy_value,\n+)\n+def test_calc_ground_truth_policy_value_using_invalid_input_data(\n+ n_rounds,\n+ n_unique_action,\n+ len_list,\n+ dim_context,\n+ reward_type,\n+ reward_structure,\n+ click_model,\n+ evaluation_policy_logit,\n+ context,\n+ description,\n+):\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ base_reward_function=logistic_reward_function,\n+ )\n+ _ = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ with pytest.raises(ValueError):\n+ dataset.calc_ground_truth_policy_value(\n+ evaluation_policy_logit=evaluation_policy_logit,\n+ context=context,\n+ )\n+\n+\n# n_rounds, n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, base_reward_function, evaluation_policy_logit, description\nvalid_input_of_calc_ground_truth_policy_value = [\n(\n@@ -1530,87 +1732,76 @@ def test_calc_ground_truth_policy_value_value_check_with_eta(click_model):\nassert policy_value_2 < policy_value_1 < policy_value_05\n-# n_rounds, n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, evaluation_policy_logit, context, description\n-invalid_input_of_calc_ground_truth_policy_value = [\n- (\n- 3,\n- 3,\n- 2,\n- 2,\n- \"binary\",\n- \"independent\",\n- None,\n- np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3]]),\n- np.ones((3, 2)),\n- None,\n- ),\n- (\n- 4,\n- 3,\n- 2,\n- 2,\n- \"binary\",\n- \"independent\",\n- None,\n- np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3]]),\n- np.ones((3, 2)),\n- None,\n- ),\n- (\n- 3,\n- 2,\n- 2,\n- 2,\n- \"binary\",\n- \"independent\",\n- None,\n- np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3]]),\n- np.ones((3, 2)),\n- None,\n- ),\n- (\n- 3,\n- 3,\n- 2,\n- 1,\n- \"binary\",\n- \"independent\",\n- None,\n- np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3]]),\n- np.ones((3, 2)),\n- None,\n- ),\n+n_rounds = 10\n+n_unique_action = 5\n+len_list = 3\n+# action, evaluation_policy_logit_\n+invalid_input_of_obtain_pscore_given_evaluation_policy_logit = [\n+ (np.ones((n_rounds, len_list)), np.ones((n_rounds, n_unique_action))),\n+ (np.ones((n_rounds * len_list)), np.ones((n_rounds * n_unique_action))),\n+ (np.ones((n_rounds * len_list + 1)), np.ones((n_rounds, n_unique_action))),\n+ (np.ones((n_rounds * len_list)), np.ones((n_rounds, n_unique_action + 1))),\n+ (np.ones((n_rounds * len_list)), np.ones((n_rounds + 1, n_unique_action))),\n]\[email protected](\n- \"n_rounds, n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, evaluation_policy_logit, context, description\",\n- invalid_input_of_calc_ground_truth_policy_value,\n+ \"action, evaluation_policy_logit_\",\n+ invalid_input_of_obtain_pscore_given_evaluation_policy_logit,\n)\n-def test_calc_ground_truth_policy_value_using_invalid_input_data(\n- n_rounds,\n- n_unique_action,\n- len_list,\n- dim_context,\n- reward_type,\n- reward_structure,\n- click_model,\n- evaluation_policy_logit,\n- context,\n- description,\n-):\n+def test_obtain_pscore_given_evaluation_policy_logit(action, evaluation_policy_logit_):\ndataset = SyntheticSlateBanditDataset(\nn_unique_action=n_unique_action,\nlen_list=len_list,\n- dim_context=dim_context,\n- reward_type=reward_type,\n- reward_structure=reward_structure,\n- click_model=click_model,\n- base_reward_function=logistic_reward_function,\n)\n- _ = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\nwith pytest.raises(ValueError):\n- dataset.calc_ground_truth_policy_value(\n- evaluation_policy_logit=evaluation_policy_logit,\n- context=context,\n+ dataset.obtain_pscore_given_evaluation_policy_logit(\n+ action=action,\n+ evaluation_policy_logit_=evaluation_policy_logit_,\n+ )\n+\n+\[email protected](\"return_pscore_item_position\", [(True), (False)])\n+def test_obtain_pscore_given_evaluation_policy_logit_value_check(\n+ return_pscore_item_position,\n+):\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=10,\n+ len_list=5,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ random_state=12345,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=2,\n+ return_pscore_item_position=return_pscore_item_position,\n+ )\n+ behavior_and_evaluation_policy_logit_ = dataset.behavior_policy_function(\n+ context=bandit_feedback[\"context\"],\n+ action_context=bandit_feedback[\"action_context\"],\n+ random_state=dataset.random_state,\n+ )\n+ (\n+ evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position,\n+ evaluation_policy_pscore_cascade,\n+ ) = dataset.obtain_pscore_given_evaluation_policy_logit(\n+ action=bandit_feedback[\"action\"],\n+ evaluation_policy_logit_=behavior_and_evaluation_policy_logit_,\n+ return_pscore_item_position=return_pscore_item_position,\n+ )\n+ print(bandit_feedback[\"pscore\"])\n+ print(evaluation_policy_pscore)\n+\n+ assert np.allclose(bandit_feedback[\"pscore\"], evaluation_policy_pscore)\n+ assert np.allclose(\n+ bandit_feedback[\"pscore_cascade\"], evaluation_policy_pscore_cascade\n+ )\n+ assert (\n+ np.allclose(\n+ bandit_feedback[\"pscore_item_position\"],\n+ evaluation_policy_pscore_item_position,\n+ )\n+ if return_pscore_item_position\n+ else bandit_feedback[\"pscore_item_position\"]\n+ == evaluation_policy_pscore_item_position\n)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add tests and minor fix |
641,011 | 26.05.2021 16:04:19 | -32,400 | 14982edf3575c9f6d4835b6be5a635e97d659160 | add is_factorizable option | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -346,6 +346,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction: np.ndarray,\nevaluation_policy_logit_: np.ndarray,\nreturn_pscore_item_position: bool = True,\n+ is_factorizable: bool = False,\n):\n\"\"\"Calculate the propensity score given evaluation policy logit.\n@@ -361,6 +362,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen n_actions and len_list are large, giving True to this parameter may lead to a large computational time.\n+ is_factorizable: bool\n+ A boolean parameter whether to use factorizable evaluation policy (which choose slot actions independently) or not.\n+ When `n_unique_action` and `len_list` are large, this parameter should be set to True because of the computational time.\n+\n\"\"\"\nif not isinstance(action, np.ndarray) or action.ndim != 1:\nraise ValueError(\"action must be 1-dimensional ndarray\")\n@@ -389,23 +394,28 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ntotal=n_rounds,\n):\nunique_action_set = np.arange(self.n_unique_action)\n+ score_ = softmax(evaluation_policy_logit_[i : i + 1])[0]\npscore_i = 1.0\nfor position_ in np.arange(self.len_list):\naction_ = action[i * self.len_list + position_]\naction_index_ = np.where(unique_action_set == action_)[0][0]\n- score_ = softmax(\n- evaluation_policy_logit_[i : i + 1, unique_action_set]\n- )[0][action_index_]\n# calculate joint pscore\n- pscore_i *= score_\n+ pscore_i *= score_[action_index_]\npscore_cascade[i * self.len_list + position_] = pscore_i\n+ # update remaining item for non-factorizable policy\n+ if not is_factorizable:\nunique_action_set = np.delete(\nunique_action_set, unique_action_set == action_\n)\n+ score_ = softmax(\n+ evaluation_policy_logit_[i : i + 1][unique_action_set]\n+ )[0]\n# calculate marginal pscore\nif return_pscore_item_position:\nif position_ == 0:\npscore_item_position_i_l = pscore_i\n+ elif is_factorizable:\n+ pscore_item_position_i_l = score_[action_index_]\nelse:\npscore_item_position_i_l = 0.0\nfor action_list in permutations(\n@@ -434,6 +444,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nbehavior_policy_logit_: np.ndarray,\nn_rounds: int,\nreturn_pscore_item_position: bool = True,\n+ is_factorizable: bool = False,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray]]:\n\"\"\"Sample action and obtain the three variants of the propensity scores.\n@@ -449,6 +460,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen n_actions and len_list are large, giving True to this parameter may lead to a large computational time.\n+ is_factorizable: bool\n+ A boolean parameter whether to use factorizable behavior policy (which choose slot actions independently) or not.\n+ When `n_unique_action` and `len_list` are large, this parameter should be set to True because of the computational time.\n+\nReturns\n----------\naction: array-like, shape (n_rounds * len_list)\n@@ -481,11 +496,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ntotal=n_rounds,\n):\nunique_action_set = np.arange(self.n_unique_action)\n+ score_ = softmax(behavior_policy_logit_[i : i + 1, unique_action_set])[0]\npscore_i = 1.0\nfor position_ in np.arange(self.len_list):\n- score_ = softmax(behavior_policy_logit_[i : i + 1, unique_action_set])[\n- 0\n- ]\nsampled_action = self.random_.choice(\nunique_action_set, p=score_, replace=False\n)\n@@ -496,13 +509,20 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n# calculate joint pscore\npscore_i *= score_[sampled_action_index]\npscore_cascade[i * self.len_list + position_] = pscore_i\n+ # update remaining items for non-factorizable behavior policy\n+ if not is_factorizable:\nunique_action_set = np.delete(\nunique_action_set, unique_action_set == sampled_action\n)\n+ score_ = softmax(\n+ behavior_policy_logit_[i : i + 1, unique_action_set]\n+ )[0]\n# calculate marginal pscore\nif return_pscore_item_position:\nif self.behavior_policy_function is None: # uniform random\npscore_item_position_i_l = 1 / self.n_unique_action\n+ elif is_factorizable:\n+ pscore_item_position_i_l = score_[sampled_action_index]\nelif position_ == 0:\npscore_item_position_i_l = pscore_i\nelse:\n@@ -598,6 +618,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself,\nn_rounds: int,\nreturn_pscore_item_position: bool = True,\n+ is_factorizable: bool = False,\n) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\n@@ -610,6 +631,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen `n_unique_action` and `len_list` are large, this parameter should be set to False because of the computational time.\n+ is_factorizable: bool\n+ A boolean parameter whether to use factorizable behavior policy (which choose slot actions independently) or not.\n+ When `n_unique_action` and `len_list` are large, this parameter should be set to True because of the computational time.\n+\nReturns\n---------\nbandit_feedback: BanditFeedback\n@@ -649,6 +674,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nbehavior_policy_logit_=behavior_policy_logit_,\nn_rounds=n_rounds,\nreturn_pscore_item_position=return_pscore_item_position,\n+ is_factorizable=is_factorizable,\n)\n# sample expected reward factual\nif self.base_reward_function is None:\n@@ -741,7 +767,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn reward.sum() / np.unique(slate_id).shape[0]\ndef calc_ground_truth_policy_value(\n- self, evaluation_policy_logit: np.ndarray, context: np.ndarray\n+ self,\n+ evaluation_policy_logit: np.ndarray,\n+ context: np.ndarray,\n+ is_factorizable: np.ndarray,\n):\n\"\"\"Calculate the ground-truth policy value of given evaluation policy logit and context\n@@ -753,6 +782,10 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ncontext: array-like, shape (n_rounds, dim_context)\nContext vectors characterizing each round (such as user information).\n+ is_factorizable: bool\n+ A boolean parameter whether to use factorizable evaluation policy (which choose slot actions independently) or not.\n+ When `n_unique_action` and `len_list` are large, this parameter should be set to True because of the computational time.\n+\n\"\"\"\nif (\nnot isinstance(evaluation_policy_logit, np.ndarray)\n@@ -788,7 +821,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n):\n# calculate pscore for each combinatorial set of items (i.e., slate actions)\npscores = []\n+ factorizable_pscore = softmax(evaluation_policy_logit[i : i + 1])[0]\nfor action_list in enumerated_slate_actions:\n+ if is_factorizable:\n+ pscores.append(\n+ np.cumprod([factorizable_pscore[a_] for a_ in action_list])[-1]\n+ )\n+ else:\npscores.append(\nself._calc_pscore_given_action_list(\naction_list=action_list,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add is_factorizable option |
641,011 | 26.05.2021 19:36:48 | -32,400 | 059858ef0183024e2d03224e2e06e7c82569ddd8 | fix calc_ground_truth_policy_value | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "\"\"\"Class for Generating Synthetic Slate Logged Bandit Feedback.\"\"\"\nfrom dataclasses import dataclass\nfrom typing import Optional, Callable, Tuple, Union, List\n-from itertools import permutations\n+from itertools import permutations, product\nimport numpy as np\nfrom scipy.stats import truncnorm\n@@ -408,7 +408,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nunique_action_set, unique_action_set == action_\n)\nscore_ = softmax(\n- evaluation_policy_logit_[i : i + 1][unique_action_set]\n+ evaluation_policy_logit_[i : i + 1, unique_action_set]\n)[0]\n# calculate marginal pscore\nif return_pscore_item_position:\n@@ -768,31 +768,31 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndef calc_ground_truth_policy_value(\nself,\n- evaluation_policy_logit: np.ndarray,\ncontext: np.ndarray,\n+ evaluation_policy_logit_: np.ndarray,\nis_factorizable: np.ndarray,\n):\n\"\"\"Calculate the ground-truth policy value of given evaluation policy logit and context\nParameters\n-----------\n- evaluation_policy_logit: array-like, shape (n_rounds, n_unique_action)\n- Evaluation policy function generating logit value of each action in action space.\n-\ncontext: array-like, shape (n_rounds, dim_context)\nContext vectors characterizing each round (such as user information).\n+ evaluation_policy_logit: array-like, shape (n_rounds, n_unique_action)\n+ Evaluation policy function generating logit value of each action in action space.\n+\nis_factorizable: bool\nA boolean parameter whether to use factorizable evaluation policy (which choose slot actions independently) or not.\nWhen `n_unique_action` and `len_list` are large, this parameter should be set to True because of the computational time.\n\"\"\"\nif (\n- not isinstance(evaluation_policy_logit, np.ndarray)\n- or evaluation_policy_logit.ndim != 2\n+ not isinstance(evaluation_policy_logit_, np.ndarray)\n+ or evaluation_policy_logit_.ndim != 2\n):\nraise ValueError(\"evaluation_policy_logit must be 2-dimensional ndarray\")\n- if evaluation_policy_logit.shape[1] != self.n_unique_action:\n+ if evaluation_policy_logit_.shape[1] != self.n_unique_action:\nraise ValueError(\n\"the size of axis 1 of evaluation_policy_logit must be the same as n_unique_action\"\n)\n@@ -802,16 +802,22 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nraise ValueError(\n\"the size of axis 1 of context must be the same as dim_context\"\n)\n- if evaluation_policy_logit.shape[0] != context.shape[0]:\n+ if evaluation_policy_logit_.shape[0] != context.shape[0]:\nraise ValueError(\n\"the length of evaluation_policy_logit and context must be same\"\n)\n+ if is_factorizable:\n+ enumerated_slate_actions = [\n+ _\n+ for _ in product(np.arange(self.n_unique_action), repeat=self.len_list)\n+ ]\n+ else:\nenumerated_slate_actions = [\n_ for _ in permutations(np.arange(self.n_unique_action), self.len_list)\n]\nn_slate_actions = len(enumerated_slate_actions)\n- n_rounds = len(evaluation_policy_logit)\n+ n_rounds = len(evaluation_policy_logit_)\npolicy_value = 0\nfor i in tqdm(\n@@ -821,7 +827,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n):\n# calculate pscore for each combinatorial set of items (i.e., slate actions)\npscores = []\n- factorizable_pscore = softmax(evaluation_policy_logit[i : i + 1])[0]\n+ factorizable_pscore = softmax(evaluation_policy_logit_[i : i + 1])[0]\nfor action_list in enumerated_slate_actions:\nif is_factorizable:\npscores.append(\n@@ -831,7 +837,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscores.append(\nself._calc_pscore_given_action_list(\naction_list=action_list,\n- policy_logit_i_=evaluation_policy_logit[i : i + 1],\n+ policy_logit_i_=evaluation_policy_logit_[i : i + 1],\n)\n)\npscores = np.array(pscores)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix calc_ground_truth_policy_value |
641,014 | 29.05.2021 08:52:36 | -32,400 | 60c31523957c35b12c9e5a5b5d3697f85661bc3c | make calc_pscore faster | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -321,30 +321,44 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_interaction_weight_matrix[position_, position_] = 1\nreturn action_interaction_weight_matrix\n- def _calc_pscore_given_action_list(\n- self, action_list: List[int], policy_logit_i_: np.ndarray\n- ) -> float:\n- \"\"\"Calculate the propensity score given combinatorial set of actions.\n+ def _calc_pscore_given_policy_logit(\n+ self, all_slate_actions: np.ndarray, policy_logit_i_: np.ndarray\n+ ) -> np.ndarray:\n+ \"\"\"Calculate the propensity score of each of the possible slate actions given policy_logit.\nParameters\n------------\n- action_list: List[int], len=len_list\n- List of combinatorial set of slate actions.\n+ all_slate_actions: array-like, (n_action, len_list)\n+ All possible slate actions.\npolicy_logit_i_: array-like, (n_unique_action, )\nLogit values given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n+ Returns\n+ ------------\n+ pscores: array-like, (n_action, )\n+ Propensity scores of all the possible slate actions given policy_logit.\n+\n\"\"\"\n- unique_action_set = np.arange(self.n_unique_action)\n- pscore_ = 1.0\n- for action in action_list:\n- score_ = softmax(policy_logit_i_[:, unique_action_set])[0]\n- action_index = np.where(unique_action_set == action)[0][0]\n- pscore_ *= score_[action_index]\n- unique_action_set = np.delete(\n- unique_action_set, unique_action_set == action\n+ n_actions = len(all_slate_actions)\n+ unique_action_set_2d = np.tile(np.arange(self.n_unique_action), (n_actions, 1))\n+ pscores = np.ones(n_actions)\n+ for position_ in np.arange(self.len_list):\n+ action_index = np.where(\n+ unique_action_set_2d == all_slate_actions[:, position_][:, np.newaxis]\n+ )[1]\n+ pscores *= softmax(policy_logit_i_[unique_action_set_2d])[\n+ np.arange(n_actions), action_index\n+ ]\n+ # delete actions\n+ if position_ != self.len_list:\n+ mask = np.ones((n_actions, self.n_unique_action - position_))\n+ mask[np.arange(n_actions), action_index] = 0\n+ unique_action_set_2d = unique_action_set_2d[mask.astype(bool)].reshape(\n+ (-1, self.n_unique_action - position_ - 1)\n)\n- return pscore_\n+\n+ return pscores\ndef obtain_pscore_given_evaluation_policy_logit(\nself,\n@@ -386,6 +400,14 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore = np.zeros(n_rounds * self.len_list)\nif return_pscore_item_position:\npscore_item_position = np.zeros(n_rounds * self.len_list)\n+ if not self.is_factorizable:\n+ enumerated_slate_actions = [\n+ _\n+ for _ in permutations(\n+ np.arange(self.n_unique_action), self.len_list\n+ )\n+ ]\n+ enumerated_slate_actions = np.array(enumerated_slate_actions)\nelse:\npscore_item_position = None\nfor i in tqdm(\n@@ -417,18 +439,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nelif self.is_factorizable:\npscore_item_position_i_l = score_[action_index_]\nelse:\n- pscore_item_position_i_l = 0.0\n- for action_list in permutations(\n- np.arange(self.n_unique_action), self.len_list\n- ):\n- if action_ != action_list[position_]:\n- continue\n- pscore_item_position_i_l += (\n- self._calc_pscore_given_action_list(\n- action_list=action_list,\n- policy_logit_i_=evaluation_policy_logit_[i : i + 1],\n- )\n+ pscores = self._calc_pscore_given_policy_logit(\n+ all_slate_actions=enumerated_slate_actions,\n+ policy_logit_i_=evaluation_policy_logit_[i],\n)\n+ pscore_item_position_i_l = pscores[\n+ enumerated_slate_actions[:, position_] == action_\n+ ].sum()\npscore_item_position[\ni * self.len_list + position_\n] = pscore_item_position_i_l\n@@ -483,6 +500,14 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore = np.zeros(n_rounds * self.len_list)\nif return_pscore_item_position:\npscore_item_position = np.zeros(n_rounds * self.len_list)\n+ if not self.is_factorizable:\n+ enumerated_slate_actions = [\n+ _\n+ for _ in permutations(\n+ np.arange(self.n_unique_action), self.len_list\n+ )\n+ ]\n+ enumerated_slate_actions = np.array(enumerated_slate_actions)\nelse:\npscore_item_position = None\nfor i in tqdm(\n@@ -504,7 +529,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n# calculate joint pscore\npscore_i *= score_[sampled_action_index]\npscore_cascade[i * self.len_list + position_] = pscore_i\n- # update the pscore given the remaining itemss for nonfactorizable behavior policy\n+ # update the pscore given the remaining items for nonfactorizable behavior policy\nif not self.is_factorizable and position_ != self.len_list - 1:\nunique_action_set = np.delete(\nunique_action_set, unique_action_set == sampled_action\n@@ -521,18 +546,13 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nelif position_ == 0:\npscore_item_position_i_l = pscore_i\nelse:\n- pscore_item_position_i_l = 0.0\n- for action_list in permutations(\n- np.arange(self.n_unique_action), self.len_list\n- ):\n- if sampled_action != action_list[position_]:\n- continue\n- pscore_item_position_i_l += (\n- self._calc_pscore_given_action_list(\n- action_list=action_list,\n- policy_logit_i_=behavior_policy_logit_[i : i + 1],\n- )\n+ pscores = self._calc_pscore_given_policy_logit(\n+ all_slate_actions=enumerated_slate_actions,\n+ policy_logit_i_=behavior_policy_logit_[i],\n)\n+ pscore_item_position_i_l = pscores[\n+ enumerated_slate_actions[:, position_] == sampled_action\n+ ].sum()\npscore_item_position[\ni * self.len_list + position_\n] = pscore_item_position_i_l\n@@ -822,12 +842,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)[-1]\n)\nelse:\n- for action_list in enumerated_slate_actions:\n- pscores.append(\n- self._calc_pscore_given_action_list(\n- action_list=action_list,\n- policy_logit_i_=evaluation_policy_logit_[i : i + 1],\n- )\n+ pscores = self._calc_pscore_given_policy_logit(\n+ all_slate_actions=np.array(enumerated_slate_actions),\n+ policy_logit_i_=evaluation_policy_logit_[i],\n)\npscores = np.array(pscores)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | make calc_pscore faster |
641,011 | 29.05.2021 16:25:42 | -32,400 | c95125a6f50560c701bd3c509964b57773631b58 | batch processing for calc_ground_truth_policy_value | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -831,7 +831,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscores.append(\nsoftmax(evaluation_policy_logit_)[:, action_list].prod(1)\n)\n- pscores = np.array(pscores).T.flatten()\n+ pscores = np.array(pscores).T\nelse:\nfor i in tqdm(\nnp.arange(n_rounds),\n@@ -844,9 +844,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npolicy_logit_i_=evaluation_policy_logit_[i],\n)\n)\n- pscores = np.array(pscores).flatten()\n+ pscores = np.array(pscores)\n- print(\"calculating expected rewards..\")\n# calculate expected slate-level reward for each combinatorial set of items (i.e., slate actions)\nif self.base_reward_function is None:\nexpected_slot_reward = self.sample_contextfree_expected_reward(\n@@ -865,9 +864,28 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nfor position_ in np.arange(self.len_list)\n]\n).T\n+ policy_value = (pscores * expected_slate_rewards.sum(axis=1)).sum()\nelse:\n- expected_slate_rewards = self.reward_function(\n- context=context,\n+ n_rounds = len(context)\n+ len_enumerated = len(enumerated_slate_actions)\n+ n_batch = (n_rounds * len_enumerated * self.len_list - 1) // 10 ** 8 + 1\n+ batch_size = ((n_rounds - 1) // n_batch) + 1\n+\n+ policy_value = 0.0\n+ for batch_idx in tqdm(\n+ np.arange(n_batch),\n+ desc=f\"[calc_ground_truth_policy_value (expected reward), batch_size={batch_size}]\",\n+ total=n_batch,\n+ ):\n+ context_ = context[\n+ batch_idx * batch_size : (batch_idx + 1) * batch_size\n+ ]\n+ pscores_ = pscores[\n+ batch_idx * batch_size : (batch_idx + 1) * batch_size\n+ ]\n+\n+ expected_slate_rewards_ = self.reward_function(\n+ context=context_,\naction_context=self.action_context,\naction=np.array(enumerated_slate_actions).flatten(),\naction_interaction_weight_matrix=self.action_interaction_weight_matrix,\n@@ -878,27 +896,36 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nis_enumerated=True,\nrandom_state=self.random_state,\n)\n- expected_slate_rewards = np.clip(\n- expected_slate_rewards, 0, None\n+\n+ expected_slate_rewards_ = np.clip(\n+ expected_slate_rewards_, 0, None\n) # (n_slate_actions, self.len_list)\n# click models based on expected reward\n- expected_slate_rewards *= self.exam_weight\n+ expected_slate_rewards_ *= self.exam_weight\nif self.reward_type == \"binary\":\n- discount_factors = np.ones(expected_slate_rewards.shape[0])\n- previous_slot_expected_reward = np.zeros(expected_slate_rewards.shape[0])\n+ discount_factors = np.ones(expected_slate_rewards_.shape[0])\n+ previous_slot_expected_reward = np.zeros(\n+ expected_slate_rewards_.shape[0]\n+ )\nfor position_ in np.arange(self.len_list):\n- discount_factors *= previous_slot_expected_reward * self.attractiveness[\n- position_\n- ] + (1 - previous_slot_expected_reward)\n- expected_slate_rewards[:, position_] = (\n- discount_factors * expected_slate_rewards[:, position_]\n+ discount_factors *= (\n+ previous_slot_expected_reward\n+ * self.attractiveness[position_]\n+ + (1 - previous_slot_expected_reward)\n)\n- previous_slot_expected_reward = expected_slate_rewards[:, position_]\n+ expected_slate_rewards_[:, position_] = (\n+ discount_factors * expected_slate_rewards_[:, position_]\n+ )\n+ previous_slot_expected_reward = expected_slate_rewards_[\n+ :, position_\n+ ]\n- policy_value = (pscores * expected_slate_rewards.sum(axis=1)).sum() / n_rounds\n+ policy_value += (\n+ pscores_.flatten() * expected_slate_rewards_.sum(axis=1)\n+ ).sum()\n- return policy_value\n+ return policy_value / n_rounds\ndef generate_evaluation_policy_pscore(\nself,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | batch processing for calc_ground_truth_policy_value |
641,011 | 30.05.2021 07:12:56 | -32,400 | 586f1d467c22765db9371d3204f7c46fecd7f334 | faster calc_ground_truth_policy_value | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -300,7 +300,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_interaction_weight_matrix[:, position_] = -self.decay_function(\nnp.abs(np.arange(len_list) - position_)\n)\n- action_interaction_weight_matrix[position_, position_] = 1\n+ action_interaction_weight_matrix[position_, position_] = 0\nreturn action_interaction_weight_matrix\ndef obtain_cascade_decay_action_interaction_weight_matrix(\n@@ -314,9 +314,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nnp.abs(np.arange(len_list) - position_)\n)\nfor position_2 in np.arange(len_list):\n- if position_ < position_2:\n+ if position_ <= position_2:\naction_interaction_weight_matrix[position_2, position_] = 0\n- action_interaction_weight_matrix[position_, position_] = 1\nreturn action_interaction_weight_matrix\ndef _calc_pscore_given_policy_logit(\n@@ -1303,8 +1302,10 @@ def action_interaction_reward_function(\nnp.arange(len(action_2d)) // n_enumerated_slate_actions,\naction_2d[:, position_],\n]\n+ if reward_structure == \"independent\":\n+ continue\n+ elif is_additive:\nfor position2_ in np.arange(len_list)[::-1]:\n- if is_additive:\nif is_cascade:\nif position_ >= position2_:\nbreak\n@@ -1314,6 +1315,12 @@ def action_interaction_reward_function(\naction_2d[:, position_], action_2d[:, position2_]\n]\nelse:\n+ for position2_ in np.arange(len_list)[::-1]:\n+ if is_cascade:\n+ if position_ >= position2_:\n+ break\n+ elif position_ == position2_:\n+ continue\nexpected_reward_ = expected_reward[\nnp.arange(len(action_2d)) // n_enumerated_slate_actions,\naction_2d[:, position2_],\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | faster calc_ground_truth_policy_value |
641,005 | 30.05.2021 20:11:51 | -32,400 | 30dae82c3e493ebb625138b55eeff95798c2daa3 | change x lables | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta.py",
"new_path": "obp/ope/meta.py",
"diff": "@@ -608,10 +608,6 @@ class OffPolicyEvaluation:\nestimated_round_rewards_df = DataFrame(\nestimated_round_rewards_dict[estimator_name]\n)\n- estimated_round_rewards_df.rename(\n- columns={key: key.upper() for key in policy_name_list},\n- inplace=True,\n- )\nif is_relative:\nestimated_round_rewards_df /= self.bandit_feedback[\"reward\"].mean()\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | change x lables |
641,011 | 31.05.2021 15:13:17 | -32,400 | 9463fb95680021319f757f3b2652d2efa05e345b | bug fix on batch processing | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -871,8 +871,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nelse:\nn_batch = (\nn_rounds * n_enumerated_slate_actions * self.len_list - 1\n- ) // 10 ** 8 + 1\n- batch_size = ((n_rounds - 1) // n_batch) + 1\n+ ) // 10 ** 7 + 1\n+ batch_size = (n_rounds - 1) // n_batch + 1\n+ n_batch = (n_rounds - 1) // batch_size + 1\npolicy_value = 0.0\nfor batch_idx in tqdm(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | bug fix on batch processing |
641,006 | 06.06.2021 19:53:13 | -32,400 | ddef53dcb93f7c86540e4907ce6ceea5f0566867 | add pscore mock test | [
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -2101,3 +2101,184 @@ def test_obtain_pscore_given_evaluation_policy_logit_value_check(\nelse bandit_feedback[\"pscore_item_position\"]\n== evaluation_policy_pscore_item_position\n)\n+\n+\n+# n_unique_action, len_list, all_slate_actions, policy_logit_i_, true_pscores, description\n+valid_input_of_calc_pscore_given_policy_logit = [\n+ (\n+ 5,\n+ 3,\n+ np.array([[0, 1, 2], [3, 1, 0]]),\n+ np.arange(5),\n+ np.array(\n+ [\n+ [\n+ np.exp(0) / np.exp([0, 1, 2, 3, 4]).sum(),\n+ np.exp(1) / np.exp([1, 2, 3, 4]).sum(),\n+ np.exp(2) / np.exp([2, 3, 4]).sum(),\n+ ],\n+ [\n+ np.exp(3) / np.exp([0, 1, 2, 3, 4]).sum(),\n+ np.exp(1) / np.exp([0, 1, 2, 4]).sum(),\n+ np.exp(0) / np.exp([0, 2, 4]).sum(),\n+ ],\n+ ]\n+ ).prod(axis=1),\n+ \"calc pscores of several slate actions\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"n_unique_action, len_list, all_slate_actions, policy_logit_i_, true_pscores, description\",\n+ valid_input_of_calc_pscore_given_policy_logit,\n+)\n+def test_calc_pscore_given_policy_logit_using_valid_input_data(\n+ n_unique_action,\n+ len_list,\n+ all_slate_actions,\n+ policy_logit_i_,\n+ true_pscores,\n+ description,\n+) -> None:\n+ # set parameters\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ base_reward_function=logistic_reward_function,\n+ )\n+ pscores = dataset._calc_pscore_given_policy_logit(\n+ all_slate_actions, policy_logit_i_\n+ )\n+ assert np.allclose(true_pscores, pscores)\n+\n+\n+# n_unique_action, len_list, evaluation_policy_logit_, action, true_pscores, true_pscores_cascade, true_pscores_item_position,description\n+mock_input_of_obtain_pscore_given_evaluation_policy_logit = [\n+ (\n+ 3,\n+ 2,\n+ np.array([[0, 1, 2], [2, 1, 0]]),\n+ np.array([2, 1, 2, 0]),\n+ np.repeat(\n+ np.array(\n+ [\n+ [\n+ np.exp(2) / np.exp([0, 1, 2]).sum(),\n+ np.exp(1) / np.exp([0, 1]).sum(),\n+ ],\n+ [\n+ np.exp(0) / np.exp([0, 1, 2]).sum(),\n+ np.exp(2) / np.exp([1, 2]).sum(),\n+ ],\n+ ]\n+ ).prod(axis=1),\n+ 2,\n+ ),\n+ np.array(\n+ [\n+ [\n+ np.exp(2) / np.exp([0, 1, 2]).sum(),\n+ np.exp(1) / np.exp([0, 1]).sum(),\n+ ],\n+ [\n+ np.exp(0) / np.exp([0, 1, 2]).sum(),\n+ np.exp(2) / np.exp([1, 2]).sum(),\n+ ],\n+ ]\n+ )\n+ .cumprod(axis=1)\n+ .flatten(),\n+ np.array(\n+ [\n+ [\n+ np.exp(2)\n+ / np.exp([0, 1, 2]).sum()\n+ * np.exp(1)\n+ / np.exp([0, 1]).sum(),\n+ np.exp(2)\n+ / np.exp([0, 1, 2]).sum()\n+ * np.exp(0)\n+ / np.exp([0, 1]).sum(),\n+ ],\n+ [\n+ np.exp(2)\n+ / np.exp([0, 1, 2]).sum()\n+ * np.exp(1)\n+ / np.exp([0, 1]).sum(),\n+ np.exp(0)\n+ / np.exp([0, 1, 2]).sum()\n+ * np.exp(1)\n+ / np.exp([1, 2]).sum(),\n+ ],\n+ [\n+ np.exp(0)\n+ / np.exp([0, 1, 2]).sum()\n+ * np.exp(1)\n+ / np.exp([1, 2]).sum(),\n+ np.exp(0)\n+ / np.exp([0, 1, 2]).sum()\n+ * np.exp(2)\n+ / np.exp([1, 2]).sum(),\n+ ],\n+ [\n+ np.exp(1)\n+ / np.exp([0, 1, 2]).sum()\n+ * np.exp(2)\n+ / np.exp([0, 2]).sum(),\n+ np.exp(0)\n+ / np.exp([0, 1, 2]).sum()\n+ * np.exp(2)\n+ / np.exp([1, 2]).sum(),\n+ ],\n+ ]\n+ ).sum(axis=1),\n+ \"calc three pscores using mock data\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"n_unique_action, len_list, evaluation_policy_logit_, action, true_pscores, true_pscores_cascade, true_pscores_item_position,description\",\n+ mock_input_of_obtain_pscore_given_evaluation_policy_logit,\n+)\n+def test_obtain_pscore_given_evaluation_policy_logit_using_mock_input_data(\n+ n_unique_action,\n+ len_list,\n+ evaluation_policy_logit_,\n+ action,\n+ true_pscores,\n+ true_pscores_cascade,\n+ true_pscores_item_position,\n+ description,\n+) -> None:\n+ # set parameters\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ random_state=random_state,\n+ base_reward_function=logistic_reward_function,\n+ )\n+ (\n+ evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position,\n+ evaluation_policy_pscore_cascade,\n+ ) = dataset.obtain_pscore_given_evaluation_policy_logit(\n+ action, evaluation_policy_logit_, return_pscore_item_position=True\n+ )\n+ assert np.allclose(true_pscores, evaluation_policy_pscore)\n+ assert np.allclose(true_pscores_cascade, evaluation_policy_pscore_cascade)\n+ assert np.allclose(\n+ true_pscores_item_position, evaluation_policy_pscore_item_position\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add pscore mock test |
641,006 | 06.06.2021 20:01:42 | -32,400 | 9a98923c0365c6d9b984b0f4d58d424166b6c30f | fix bug of update params (reward_counts should not be the mean, but the count | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/contextfree.py",
"new_path": "obp/policy/contextfree.py",
"diff": "@@ -89,8 +89,7 @@ class EpsilonGreedy(BaseContextFreePolicy):\n\"\"\"\nself.n_trial += 1\nself.action_counts_temp[action] += 1\n- n, old_reward = self.action_counts_temp[action], self.reward_counts_temp[action]\n- self.reward_counts_temp[action] = (old_reward * (n - 1) / n) + (reward / n)\n+ self.reward_counts_temp[action] += reward\nif self.n_trial % self.batch_size == 0:\nself.action_counts = np.copy(self.action_counts_temp)\nself.reward_counts = np.copy(self.reward_counts_temp)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_contextfree.py",
"new_path": "tests/policy/test_contextfree.py",
"diff": "@@ -86,9 +86,7 @@ def test_egreedy_update_params():\nreward = 1.0\npolicy.update_params(action, reward)\nassert np.array_equal(policy.action_counts, np.array([5, 3]))\n- # in epsilon greedy, reward_counts is defined as the mean of observed rewards for each action\n- next_reward = (2.0 * (5 - 1) / 5) + (reward / 5)\n- assert np.allclose(policy.reward_counts, np.array([next_reward, 0.0]))\n+ assert np.allclose(policy.reward_counts, np.array([2.0 + reward, 0.0]))\ndef test_random_compute_batch_action_dist():\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix bug of update params (reward_counts should not be the mean, but the count |
641,006 | 06.06.2021 20:08:57 | -32,400 | 3722d00a8310b884f67212b81915fa58dd517d88 | delete action/pscore/reward/estimated_rewards_by_model from check_ope_inputs | [
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -301,8 +301,6 @@ def check_ope_inputs(\n# estimated_rewards_by_reg_model\nif estimated_rewards_by_reg_model is not None:\n- if not isinstance(estimated_rewards_by_reg_model, np.ndarray):\n- raise ValueError(\"estimated_rewards_by_reg_model must be ndarray\")\nif estimated_rewards_by_reg_model.shape != action_dist.shape:\nraise ValueError(\n\"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\"\n@@ -310,12 +308,8 @@ def check_ope_inputs(\n# action, reward\nif action is not None or reward is not None:\n- if not isinstance(action, np.ndarray):\n- raise ValueError(\"action must be ndarray\")\nif action.ndim != 1:\nraise ValueError(\"action must be 1-dimensional\")\n- if not isinstance(reward, np.ndarray):\n- raise ValueError(\"reward must be ndarray\")\nif reward.ndim != 1:\nraise ValueError(\"reward must be 1-dimensional\")\nif not (action.shape[0] == reward.shape[0]):\n@@ -329,8 +323,6 @@ def check_ope_inputs(\n# pscore\nif pscore is not None:\n- if not isinstance(pscore, np.ndarray):\n- raise ValueError(\"pscore must be ndarray\")\nif pscore.ndim != 1:\nraise ValueError(\"pscore must be 1-dimensional\")\nif not (action.shape[0] == reward.shape[0] == pscore.shape[0]):\n@@ -671,8 +663,6 @@ def check_ope_inputs_tensor(\n# estimated_rewards_by_reg_model\nif estimated_rewards_by_reg_model is not None:\n- if not isinstance(estimated_rewards_by_reg_model, torch.Tensor):\n- raise ValueError(\"estimated_rewards_by_reg_model must be Tensor\")\nif estimated_rewards_by_reg_model.shape != action_dist.shape:\nraise ValueError(\n\"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\"\n@@ -680,12 +670,8 @@ def check_ope_inputs_tensor(\n# action, reward\nif action is not None or reward is not None:\n- if not isinstance(action, torch.Tensor):\n- raise ValueError(\"action must be Tensor\")\nif action.ndim != 1:\nraise ValueError(\"action must be 1-dimensional\")\n- if not isinstance(reward, torch.Tensor):\n- raise ValueError(\"reward must be Tensor\")\nif reward.ndim != 1:\nraise ValueError(\"reward must be 1-dimensional\")\nif not (action.shape[0] == reward.shape[0]):\n@@ -699,8 +685,6 @@ def check_ope_inputs_tensor(\n# pscore\nif pscore is not None:\n- if not isinstance(pscore, torch.Tensor):\n- raise ValueError(\"pscore must be Tensor\")\nif pscore.ndim != 1:\nraise ValueError(\"pscore must be 1-dimensional\")\nif not (action.shape[0] == reward.shape[0] == pscore.shape[0]):\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | delete action/pscore/reward/estimated_rewards_by_model from check_ope_inputs |
641,010 | 21.06.2021 23:36:15 | -28,800 | c773dfb8b11ef26e713822ef38cd743e1a02d8fd | docs: add buttons to open ipynb files in Colab | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -78,7 +78,7 @@ Open Bandit Pipeline consists of the following main modules.\n- [**dataset module**](./obp/dataset/): This module provides a data loader for Open Bandit Dataset and a flexible interface for handling logged bandit feedback. It also provides tools to generate synthetic bandit data and transform multi-class classification data to bandit data.\n- [**policy module**](./obp/policy/): This module provides interfaces for implementing new online and offline bandit policies. It also implements several standard policy learning methods.\n-- [**simulator module**](./obp/simulator/): This module provides functions for conducting offline bandit simulation. This module is necessary only when we want to implement the ReplayMethod to evaluate the performance of online or adaptive bandit policies with logged bandit data. Please refer to [examples/quickstart/online.ipynb](./examples/quickstart/online.ipynb) for the quickstart guide of implementing OPE of online bandit algorithms.\n+- [**simulator module**](./obp/simulator/): This module provides functions for conducting offline bandit simulation. This module is necessary only when we want to implement the ReplayMethod to evaluate the performance of online or adaptive bandit policies with logged bandit data. Please refer to [examples/quickstart/online.ipynb](./examples/quickstart/online.ipynb) [](https://colab.research.google.com/github/st-tech/zr-obp/blob/master/examples/quickstart/online.ipynb) for the quickstart guide of implementing OPE of online bandit algorithms.\n- [**ope module**](./obp/ope/): This module provides interfaces for implementing OPE estimators. It also implements several standard and advanced OPE estimators.\n### Algorithms and OPE Estimators Supported\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/README.md",
"new_path": "examples/quickstart/README.md",
"diff": "This page contains a list of quickstart notebooks written with the Open Bandit Pipeline.\n-- [`obd.ipynb`](./obd.ipynb): a quickstart guide of the Open Bandit Dataset and Pipeline.\n-- [`synthetic.ipynb`](./synthetic.ipynb): a quickstart guide to implement the standard off-policy learning, off-policy evaluation (OPE), and the evaluation of OPE procedures with the Open Bandit Pipeline.\n-- [`multiclass.ipynb`](./multiclass.ipynb): a quickstart guide to handle multi-class classification data as logged bandit feedback data for the standard off-policy learning, off-policy evaluation (OPE), and the evaluation of OPE procedures with the Open Bandit Pipeline.\n-- [`online.ipynb`](./online.ipynb): a quickstart guide to implement off-policy evaluation (OPE) and the evaluation of OPE procedures for online bandit algorithms with the Open Bandit Pipeline.\n-- [`opl.ipynb`](./opl.ipynb): a quickstart guide to implement off-policy learners and the evaluation of off-policy learners with the Open Bandit Pipeline.\n-- [`synthetic_slate.ipynb`](./synthetic_slate.ipynb): a quickstart guide to implement off-policy evaluation (OPE) and the evaluation of OPE procedures for the slate recommendation setting with the Open Bandit Pipeline.\n+- [`obd.ipynb`](./obd.ipynb) [](https://colab.research.google.com/github/st-tech/zr-obp/blob/master/examples/quickstart/obd.ipynb): a quickstart guide of the Open Bandit Dataset and Pipeline.\n+- [`synthetic.ipynb`](./synthetic.ipynb) [](https://colab.research.google.com/github/st-tech/zr-obp/blob/master/examples/quickstart/synthetic.ipynb): a quickstart guide to implement the standard off-policy learning, off-policy evaluation (OPE), and the evaluation of OPE procedures with the Open Bandit Pipeline.\n+- [`multiclass.ipynb`](./multiclass.ipynb) [](https://colab.research.google.com/github/st-tech/zr-obp/blob/master/examples/quickstart/multiclass.ipynb): a quickstart guide to handle multi-class classification data as logged bandit feedback data for the standard off-policy learning, off-policy evaluation (OPE), and the evaluation of OPE procedures with the Open Bandit Pipeline.\n+- [`online.ipynb`](./online.ipynb) [](https://colab.research.google.com/github/st-tech/zr-obp/blob/master/examples/quickstart/online.ipynb): a quickstart guide to implement off-policy evaluation (OPE) and the evaluation of OPE procedures for online bandit algorithms with the Open Bandit Pipeline.\n+- [`opl.ipynb`](./opl.ipynb) [](https://colab.research.google.com/github/st-tech/zr-obp/blob/master/examples/quickstart/opl.ipynb): a quickstart guide to implement off-policy learners and the evaluation of off-policy learners with the Open Bandit Pipeline.\n+- [`synthetic_slate.ipynb`](./synthetic_slate.ipynb) [](https://colab.research.google.com/github/st-tech/zr-obp/blob/master/examples/quickstart/synthetic_slate.ipynb): a quickstart guide to implement off-policy evaluation (OPE) and the evaluation of OPE procedures for the slate recommendation setting with the Open Bandit Pipeline.\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | docs: add buttons to open ipynb files in Colab |
641,006 | 27.06.2021 22:37:53 | -32,400 | 58332df77a41600f0e74fe616b44cfb2aa2b6462 | add faster method to calculate pscore item position | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -11,6 +11,7 @@ from scipy.stats import truncnorm\nfrom scipy.special import perm\nfrom sklearn.utils import check_random_state, check_scalar\nfrom tqdm import tqdm\n+from profilehooks import profile\nfrom .base import BaseBanditDataset\nfrom ..types import BanditFeedback\n@@ -359,6 +360,46 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn pscores\n+ def _calc_pscore_given_policy_value(\n+ self, all_slate_actions: np.ndarray, policy_value_i_: np.ndarray\n+ ) -> np.ndarray:\n+ \"\"\"Calculate the propensity score of each of the possible slate actions given policy_logit.\n+\n+ Parameters\n+ ------------\n+ all_slate_actions: array-like, (n_action, len_list)\n+ All possible slate actions.\n+\n+ policy_value_i_: array-like, (n_unique_action, )\n+ Policy values given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n+\n+ Returns\n+ ------------\n+ pscores: array-like, (n_action, )\n+ Propensity scores of all the possible slate actions given policy_logit.\n+\n+ \"\"\"\n+ n_actions = len(all_slate_actions)\n+ unique_action_set_2d = np.tile(np.arange(self.n_unique_action), (n_actions, 1))\n+ pscores = np.ones(n_actions)\n+ for position_ in np.arange(self.len_list):\n+ action_index = np.where(\n+ unique_action_set_2d == all_slate_actions[:, position_][:, np.newaxis]\n+ )[1]\n+ score_ = policy_value_i_[unique_action_set_2d]\n+ pscores *= np.divide(score_, score_.sum(axis=1, keepdims=True))[\n+ np.arange(n_actions), action_index\n+ ]\n+ # delete actions\n+ if position_ + 1 != self.len_list:\n+ mask = np.ones((n_actions, self.n_unique_action - position_))\n+ mask[np.arange(n_actions), action_index] = 0\n+ unique_action_set_2d = unique_action_set_2d[mask.astype(bool)].reshape(\n+ (-1, self.n_unique_action - position_ - 1)\n+ )\n+\n+ return pscores\n+\ndef obtain_pscore_given_evaluation_policy_logit(\nself,\naction: np.ndarray,\n@@ -455,11 +496,14 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn pscore, pscore_item_position, pscore_cascade\n+ # TODO: `@profile` will be removed before merging.\n+ @profile\ndef sample_action_and_obtain_pscore(\nself,\nbehavior_policy_logit_: np.ndarray,\nn_rounds: int,\nreturn_pscore_item_position: bool = True,\n+ clip_logit_value: Optional[float] = None,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Optional[np.ndarray]]:\n\"\"\"Sample action and obtain the three variants of the propensity scores.\n@@ -475,6 +519,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen n_actions and len_list are large, giving True to this parameter may lead to a large computational time.\n+ clip_logit_value: Optional[float], default=None\n+ A float parameter to clip logit value (<= `700.`).\n+ When None is given, we calculate softmax values without clipping to obtain `pscore_item_position`.\n+ When a float value is given, we clip logit values to calculate softmax values to obtain `pscore_item_position`.\n+ When n_actions and len_list are large, giving None to this parameter may lead to a large computational time.\n+\nReturns\n----------\naction: array-like, shape (n_rounds * len_list)\n@@ -509,6 +559,16 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nenumerated_slate_actions = np.array(enumerated_slate_actions)\nelse:\npscore_item_position = None\n+ if return_pscore_item_position and clip_logit_value is not None:\n+ check_scalar(\n+ clip_logit_value,\n+ name=\"clip_logit_value\",\n+ target_type=(float),\n+ max_val=700.0,\n+ )\n+ behavior_policy_value_ = np.exp(\n+ np.minimum(behavior_policy_logit_, clip_logit_value)\n+ )\nfor i in tqdm(\nnp.arange(n_rounds),\ndesc=\"[sample_action_and_obtain_pscore]\",\n@@ -544,6 +604,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore_item_position_i_l = score_[sampled_action_index]\nelif position_ == 0:\npscore_item_position_i_l = pscore_i\n+ else:\n+ if isinstance(clip_logit_value, float):\n+ pscores = self._calc_pscore_given_policy_value(\n+ all_slate_actions=enumerated_slate_actions,\n+ policy_value_i_=behavior_policy_value_[i],\n+ )\nelse:\npscores = self._calc_pscore_given_policy_logit(\nall_slate_actions=enumerated_slate_actions,\n@@ -632,6 +698,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself,\nn_rounds: int,\nreturn_pscore_item_position: bool = True,\n+ clip_logit_value: Optional[float] = None,\n) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\n@@ -644,6 +711,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen `n_unique_action` and `len_list` are large, this parameter should be set to False because of the computational time.\n+ clip_softmax_value: Optional[float], default=None\n+ A float parameter to clip logit value.\n+ When None is given, we calculate softmax values without clipping to obtain `pscore_item_position`.\n+ When a float value is given, we clip logit values to calculate softmax values to obtain `pscore_item_position`.\n+ When n_actions and len_list are large, giving None to this parameter may lead to a large computational time.\n+\nReturns\n---------\nbandit_feedback: BanditFeedback\n@@ -683,6 +756,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nbehavior_policy_logit_=behavior_policy_logit_,\nn_rounds=n_rounds,\nreturn_pscore_item_position=return_pscore_item_position,\n+ clip_logit_value=clip_logit_value,\n)\n# sample expected reward factual\nif self.base_reward_function is None:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add faster method to calculate pscore item position |
641,006 | 27.06.2021 22:38:29 | -32,400 | b0e8c698e99f65fa0a119cac1bcd2cce4e078a0f | add example of obtaining slate bandit feedback | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "examples/synthetic/obtain_slate_bandit_feedback.py",
"diff": "+import argparse\n+\n+from obp.dataset import (\n+ logistic_reward_function,\n+ linear_behavior_policy_logit,\n+ SyntheticSlateBanditDataset,\n+)\n+\n+\n+if __name__ == \"__main__\":\n+ parser = argparse.ArgumentParser(description=\"run slate dataset.\")\n+ parser.add_argument(\n+ \"--n_unique_action\", type=int, default=10, help=\"number of unique actions.\"\n+ )\n+ parser.add_argument(\n+ \"--len_list\", type=int, default=3, help=\"number of item positions.\"\n+ )\n+ parser.add_argument(\"--n_rounds\", type=int, default=100, help=\"number of slates.\")\n+ parser.add_argument(\n+ \"--clip_logit_value\",\n+ type=float,\n+ default=None,\n+ help=\"a float parameter to clip logit value.\",\n+ )\n+ parser.add_argument(\n+ \"--is_factorizable\",\n+ type=bool,\n+ default=False,\n+ help=\"a boolean parameter whether to use factorizable evaluation policy.\",\n+ )\n+ parser.add_argument(\n+ \"--return_pscore_item_position\",\n+ type=bool,\n+ default=True,\n+ help=\"a boolean parameter whether `pscore_item_position` is returned or not\",\n+ )\n+ parser.add_argument(\"--random_state\", type=int, default=12345)\n+ args = parser.parse_args()\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=args.n_unique_action,\n+ dim_context=5,\n+ len_list=args.len_list,\n+ base_reward_function=logistic_reward_function,\n+ behavior_policy_function=linear_behavior_policy_logit,\n+ reward_type=\"binary\",\n+ reward_structure=\"cascade_additive\",\n+ click_model=\"cascade\",\n+ random_state=12345,\n+ is_factorizable=args.is_factorizable,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=args.n_rounds,\n+ return_pscore_item_position=args.return_pscore_item_position,\n+ clip_logit_value=args.clip_logit_value,\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add example of obtaining slate bandit feedback |
641,006 | 27.06.2021 23:26:53 | -32,400 | a4cfb2fdaf46594e49e7033bed8e8d5a35484e94 | add clip_logit_value to obtain_pscore_given_evaluation_policy_logit | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -405,6 +405,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction: np.ndarray,\nevaluation_policy_logit_: np.ndarray,\nreturn_pscore_item_position: bool = True,\n+ clip_logit_value: Optional[float] = None,\n):\n\"\"\"Calculate the propensity score given evaluation policy logit.\n@@ -420,6 +421,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen n_actions and len_list are large, giving True to this parameter may lead to a large computational time.\n+ clip_logit_value: Optional[float], default=None\n+ A float parameter to clip logit value (<= `700.`).\n+ When None is given, we calculate softmax values without clipping to obtain `pscore_item_position`.\n+ When a float value is given, we clip logit values to calculate softmax values to obtain `pscore_item_position`.\n+ When n_actions and len_list are large, giving None to this parameter may lead to a large computational time.\n+\n\"\"\"\nif not isinstance(action, np.ndarray) or action.ndim != 1:\nraise ValueError(\"action must be 1-dimensional ndarray\")\n@@ -450,6 +457,16 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nenumerated_slate_actions = np.array(enumerated_slate_actions)\nelse:\npscore_item_position = None\n+ if return_pscore_item_position and clip_logit_value is not None:\n+ check_scalar(\n+ clip_logit_value,\n+ name=\"clip_logit_value\",\n+ target_type=(float),\n+ max_val=700.0,\n+ )\n+ evaluation_policy_value_ = np.exp(\n+ np.minimum(evaluation_policy_logit_, clip_logit_value)\n+ )\nfor i in tqdm(\nnp.arange(n_rounds),\ndesc=\"[obtain_pscore_given_evaluation_policy_logit]\",\n@@ -478,6 +495,12 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore_item_position_i_l = pscore_i\nelif self.is_factorizable:\npscore_item_position_i_l = score_[action_index_]\n+ else:\n+ if isinstance(clip_logit_value, float):\n+ pscores = self._calc_pscore_given_policy_value(\n+ all_slate_actions=enumerated_slate_actions,\n+ policy_value_i_=evaluation_policy_value_[i],\n+ )\nelse:\npscores = self._calc_pscore_given_policy_logit(\nall_slate_actions=enumerated_slate_actions,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -2157,6 +2157,10 @@ def test_calc_pscore_given_policy_logit_using_valid_input_data(\nall_slate_actions, policy_logit_i_\n)\nassert np.allclose(true_pscores, pscores)\n+ pscores = dataset._calc_pscore_given_policy_value(\n+ all_slate_actions, np.exp(policy_logit_i_)\n+ )\n+ assert np.allclose(true_pscores, pscores)\n# n_unique_action, len_list, evaluation_policy_logit_, action, true_pscores, true_pscores_cascade, true_pscores_item_position,description\n@@ -2282,3 +2286,19 @@ def test_obtain_pscore_given_evaluation_policy_logit_using_mock_input_data(\nassert np.allclose(\ntrue_pscores_item_position, evaluation_policy_pscore_item_position\n)\n+\n+ (\n+ evaluation_policy_pscore,\n+ evaluation_policy_pscore_item_position,\n+ evaluation_policy_pscore_cascade,\n+ ) = dataset.obtain_pscore_given_evaluation_policy_logit(\n+ action,\n+ evaluation_policy_logit_,\n+ return_pscore_item_position=True,\n+ clip_logit_value=100.0,\n+ )\n+ assert np.allclose(true_pscores, evaluation_policy_pscore)\n+ assert np.allclose(true_pscores_cascade, evaluation_policy_pscore_cascade)\n+ assert np.allclose(\n+ true_pscores_item_position, evaluation_policy_pscore_item_position\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add clip_logit_value to obtain_pscore_given_evaluation_policy_logit |
641,014 | 06.07.2021 13:29:57 | -32,400 | b1c817b0e1ff9c05104a1038cc5a362eaef4569f | implement SyntheticContinuousBanditDataset | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -6,6 +6,13 @@ from obp.dataset.synthetic import logistic_reward_function\nfrom obp.dataset.synthetic import linear_reward_function\nfrom obp.dataset.synthetic import linear_behavior_policy\nfrom obp.dataset.multiclass import MultiClassToBanditReduction\n+from obp.dataset.synthetic_continuous import SyntheticContinuousBanditDataset\n+from obp.dataset.synthetic_continuous import linear_reward_funcion_continuous\n+from obp.dataset.synthetic_continuous import quadratic_reward_funcion_continuous\n+from obp.dataset.synthetic_continuous import linear_behavior_policy_continuous\n+from obp.dataset.synthetic_continuous import linear_synthetic_policy_continuous\n+from obp.dataset.synthetic_continuous import threshold_synthetic_policy_continuous\n+from obp.dataset.synthetic_continuous import sin_synthetic_policy_continuous\nfrom obp.dataset.synthetic_slate import SyntheticSlateBanditDataset\nfrom obp.dataset.synthetic_slate import action_interaction_reward_function\nfrom obp.dataset.synthetic_slate import linear_behavior_policy_logit\n@@ -19,6 +26,13 @@ __all__ = [\n\"linear_reward_function\",\n\"linear_behavior_policy\",\n\"MultiClassToBanditReduction\",\n+ \"SyntheticContinuousBanditDataset\",\n+ \"linear_reward_funcion_continuous\",\n+ \"quadratic_reward_funcion_continuous\",\n+ \"linear_behavior_policy_continuous\",\n+ \"linear_synthetic_policy_continuous\",\n+ \"threshold_synthetic_policy_continuous\",\n+ \"sin_synthetic_policy_continuous\",\n\"SyntheticSlateBanditDataset\",\n\"action_interaction_reward_function\",\n\"linear_behavior_policy_logit\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "obp/dataset/synthetic_continuous.py",
"diff": "+# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.\n+# Licensed under the Apache 2.0 License.\n+\n+\"\"\"Class for Generating Synthetic Continuous Logged Bandit Feedback.\"\"\"\n+from dataclasses import dataclass\n+from typing import Optional, Callable\n+\n+import numpy as np\n+from scipy.stats import uniform, truncnorm\n+from sklearn.utils import check_random_state, check_scalar\n+\n+from .base import BaseBanditDataset\n+from ..types import BanditFeedback\n+\n+\n+@dataclass\n+class SyntheticContinuousBanditDataset(BaseBanditDataset):\n+ \"\"\"Class for generating synthetic continuous bandit dataset.\n+\n+ Note\n+ -----\n+ By calling the `obtain_batch_bandit_feedback` method several times, we have different bandit samples with the same setting.\n+ This can be used to estimate confidence intervals of the performances of OPE estimators for continuous actions.\n+ If None is set as `behavior_policy_function`, the synthetic data will be context-free bandit feedback.\n+\n+ Parameters\n+ -----------\n+ dim_context: int, default=1\n+ Number of dimensions of context vectors.\n+\n+ reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\n+ Function generating expected reward for each given action-context pair,\n+ i.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n+ If None is set, context **independent** expected reward for each action will be\n+ sampled from the uniform distribution automatically.\n+\n+ behavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default=None\n+ Function generating the propensity score of continuous actions,\n+ i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\n+ If None is set, context **independent** uniform distribution will be used (uniform behavior policy).\n+\n+ random_state: int, default=12345\n+ Controls the random seed in sampling synthetic slate bandit dataset.\n+\n+ dataset_name: str, default='synthetic_slate_bandit_dataset'\n+ Name of the dataset.\n+\n+ Examples\n+ ----------\n+\n+ .. code-block:: python\n+\n+ >>> from obp.dataset import (\n+ SyntheticContinuousBanditDataset,\n+ linear_reward_funcion_continuous,\n+ linear_behavior_policy_continuous,\n+ )\n+\n+ >>> dataset = SyntheticContinuousBanditDataset(\n+ dim_context=5,\n+ reward_function=linear_reward_funcion_continuous,\n+ behavior_policy_function=linear_behavior_policy_continuous,\n+ random_state=12345,\n+ )\n+ >>> bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=10000, min_action_value=1, max_action_value=10,\n+ )\n+\n+ >>> bandit_feedback\n+\n+ {\n+ 'n_rounds': 10000,\n+ 'context': array([[-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057],\n+ [ 1.39340583, 0.09290788, 0.28174615, 0.76902257, 1.24643474],\n+ [ 1.00718936, -1.29622111, 0.27499163, 0.22891288, 1.35291684],\n+ ...,\n+ [-1.27028221, 0.80914602, -0.45084222, 0.47179511, 1.89401115],\n+ [-0.68890924, 0.08857502, -0.56359347, -0.41135069, 0.65157486],\n+ [ 0.51204121, 0.65384817, -1.98849253, -2.14429131, -0.34186901]]),\n+ 'action': array([7.15163752, 2.22523458, 1.80661079, ..., 3.23401871, 2.36257676,\n+ 3.46584587]),\n+ 'reward': array([2.23806215, 3.04770578, 1.64975454, ..., 1.75709223, 1.07265021,\n+ 2.4478468 ]),\n+ 'pscore': array([0.13484565, 0.39339631, 0.32859093, ..., 0.04650679, 0.34450074,\n+ 0.31665289]),\n+ 'position': None,\n+ 'expected_reward': array([3.01472331, 1.25381652, 0.9098273 , ..., 1.75787986, 1.04337996,\n+ 2.32619881])\n+ }\n+\n+ \"\"\"\n+\n+ dim_context: int = 1\n+ reward_function: Optional[\n+ Callable[\n+ [np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray\n+ ]\n+ ] = None\n+ behavior_policy_function: Optional[\n+ Callable[[np.ndarray, np.ndarray], np.ndarray]\n+ ] = None\n+ random_state: int = 12345\n+ dataset_name: str = \"synthetic_continuous_bandit_dataset\"\n+\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize Class.\"\"\"\n+ check_scalar(self.dim_context, name=\"dim_context\", target_type=int, min_val=1)\n+ if self.random_state is None:\n+ raise ValueError(\"random_state must be given\")\n+ self.random_ = check_random_state(self.random_state)\n+\n+ def _contextfree_reward_function(self, action: np.ndarray) -> np.ndarray:\n+ \"\"\"Calculate context-free expected rewards given only continuous action values.\"\"\"\n+ return 2 * np.power(action, 1.5) - (5 * action)\n+\n+ def obtain_batch_bandit_feedback(\n+ self,\n+ n_rounds: int,\n+ action_noise: float = 1.0,\n+ reward_noise: float = 1.0,\n+ min_action_value: float = -np.inf,\n+ max_action_value: float = np.inf,\n+ ) -> BanditFeedback:\n+ \"\"\"Obtain batch logged bandit feedback.\n+\n+ Parameters\n+ ----------\n+ n_rounds: int\n+ Number of rounds for synthetic bandit feedback data.\n+\n+ action_noise: float, default=1.0\n+ Standard deviation of the Gaussian noise on the continuous action value.\n+\n+ reward_noise: float, default=1.0\n+ Standard deviation of the Gaussian noise on the reward.\n+\n+ min_action_value: float, default=-np.inf\n+ A minimum possible continuous action value.\n+\n+ max_action_value: float, default=np.inf\n+ A maximum possible continuous action value.\n+\n+ Returns\n+ ---------\n+ bandit_feedback: BanditFeedback\n+ Generated synthetic bandit feedback dataset with continuous actions.\n+\n+ \"\"\"\n+ check_scalar(n_rounds, name=\"n_rounds\", target_type=int, min_val=1)\n+ check_scalar(\n+ action_noise, name=\"action_noise\", target_type=(int, float), min_val=0\n+ )\n+ check_scalar(\n+ reward_noise, name=\"reward_noise\", target_type=(int, float), min_val=0\n+ )\n+ check_scalar(\n+ min_action_value, name=\"min_action_value\", target_type=(int, float)\n+ )\n+ check_scalar(\n+ max_action_value, name=\"max_action_value\", target_type=(int, float)\n+ )\n+ if max_action_value <= min_action_value:\n+ raise ValueError(\n+ \"`max_action_value` must be larger than `min_action_value`\"\n+ )\n+\n+ context = self.random_.normal(size=(n_rounds, self.dim_context))\n+ # sample actions for each round based on the behavior policy\n+ if self.behavior_policy_function is not None:\n+ expected_action_values = self.behavior_policy_function(\n+ context=context,\n+ random_state=self.random_state,\n+ )\n+ a = (min_action_value - expected_action_values) / action_noise\n+ b = (max_action_value - expected_action_values) / action_noise\n+ action = truncnorm.rvs(\n+ a,\n+ b,\n+ loc=expected_action_values,\n+ scale=action_noise,\n+ random_state=self.random_state,\n+ )\n+ pscore = truncnorm.pdf(\n+ action, a, b, loc=expected_action_values, scale=action_noise\n+ )\n+ else:\n+ action = uniform.rvs(\n+ loc=min_action_value,\n+ scale=(max_action_value - min_action_value),\n+ size=n_rounds,\n+ random_state=self.random_state,\n+ )\n+ pscore = uniform.pdf(\n+ action,\n+ loc=min_action_value,\n+ scale=(max_action_value - min_action_value),\n+ )\n+\n+ if self.reward_function is None:\n+ expected_reward_ = self._contextfree_reward_function(action=action)\n+ else:\n+ expected_reward_ = self.reward_function(\n+ context=context, action=action, random_state=self.random_state\n+ )\n+ reward = expected_reward_ + self.random_.normal(\n+ scale=reward_noise, size=n_rounds\n+ )\n+\n+ return dict(\n+ n_rounds=n_rounds,\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=None, # position is irrelevant for continuous action data\n+ expected_reward=expected_reward_,\n+ )\n+\n+ def calc_ground_truth_policy_value(\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ ) -> float:\n+ \"\"\"Calculate the policy value of the action sequence.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds_of_test_data, dim_context)\n+ Context vectors of test data.\n+\n+ action: array-like, shape (n_rounds_of_test_data,)\n+ Continuous action values for test data predicted by the (deterministic) evaluation policy, i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ Returns\n+ ----------\n+ policy_value: float\n+ The policy value of the evaluation policy on the given test bandit feedback data.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+ if context.shape[1] != self.dim_context:\n+ raise ValueError(\n+ \"the size of axis 1 of context must be the same as dim_context\"\n+ )\n+ if not isinstance(action, np.ndarray) or action.ndim != 1:\n+ raise ValueError(\"action must be 1-dimensional ndarray\")\n+ if context.shape[0] != action.shape[0]:\n+ raise ValueError(\n+ \"the size of axis 0 of context must be the same as that of action\"\n+ )\n+\n+ if self.reward_function is None:\n+ return self._contextfree_reward_function(action=action).mean()\n+ else:\n+ return self.reward_function(\n+ context=context, action=action, random_state=self.random_state\n+ ).mean()\n+\n+\n+# some functions to generate synthetic bandit feedback with continuous actions\n+\n+\n+def linear_reward_funcion_continuous(\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Linear reward function to generate synthetic continuous bandit datasets.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ action: array-like, shape (n_rounds,)\n+ Continuous action values.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling parameters.\n+\n+ Returns\n+ ---------\n+ expected_reward: array-like, shape (n_rounds,)\n+ Expected reward given context (:math:`x`) and continuous action (:math:`a`).\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+ if not isinstance(action, np.ndarray) or action.ndim != 1:\n+ raise ValueError(\"action must be 1-dimensional ndarray\")\n+ if context.shape[0] != action.shape[0]:\n+ raise ValueError(\n+ \"the size of axis 0 of context must be the same as that of action\"\n+ )\n+\n+ random_ = check_random_state(random_state)\n+ coef_ = random_.normal(size=context.shape[1])\n+ pow_, bias = random_.uniform(size=2)\n+ return (np.abs(context @ coef_ - action) ** pow_) + bias\n+\n+\n+def quadratic_reward_funcion_continuous(\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Quadratic reward function to generate synthetic continuous bandit datasets.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ action: array-like, shape (n_rounds,)\n+ Continuous action values.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling parameters.\n+\n+ Returns\n+ ---------\n+ expected_reward: array-like, shape (n_rounds,)\n+ Expected reward given context (:math:`x`) and continuous action (:math:`a`).\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+ if not isinstance(action, np.ndarray) or action.ndim != 1:\n+ raise ValueError(\"action must be 1-dimensional ndarray\")\n+ if context.shape[0] != action.shape[0]:\n+ raise ValueError(\n+ \"the size of axis 0 of context must be the same as that of action\"\n+ )\n+\n+ random_ = check_random_state(random_state)\n+ coef_x = random_.normal(size=context.shape[1])\n+ coef_x_a = random_.normal(size=context.shape[1])\n+ coef_x_a_squared = random_.normal(size=context.shape[1])\n+ coef_a = random_.normal(size=1)\n+\n+ expected_reward = (coef_a * action) * (context @ coef_x)\n+ expected_reward += (context @ coef_x_a) * action\n+ expected_reward += (action - context @ coef_x_a_squared) ** 2\n+ return expected_reward\n+\n+\n+def linear_behavior_policy_continuous(\n+ context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Linear behavior policy function to generate synthetic continuous bandit datasets.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling parameters.\n+\n+ Returns\n+ ---------\n+ expected_action_value: array-like, shape (n_rounds,)\n+ Expected continuous action values given context (:math:`x`).\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ random_ = check_random_state(random_state)\n+ coef_ = random_.normal(size=context.shape[1])\n+ bias = random_.uniform(size=1)\n+ return context @ coef_ + bias\n+\n+\n+# some functions to generate synthetic (evaluation) policies for continuous actions\n+\n+\n+def linear_synthetic_policy_continuous(context: np.ndarray) -> np.ndarray:\n+ \"\"\"Linear synthtic policy for continuous actions.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ Returns\n+ ---------\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by a synthetic (deterministic) evaluation policy, i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ return context.mean(1)\n+\n+\n+def threshold_synthetic_policy_continuous(context: np.ndarray) -> np.ndarray:\n+ \"\"\"Threshold synthtic policy for continuous actions.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ Returns\n+ ---------\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by a synthetic (deterministic) evaluation policy, i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ return 1.0 + np.sign(context.mean(1) - 1.5)\n+\n+\n+def sin_synthetic_policy_continuous(context: np.ndarray) -> np.ndarray:\n+ \"\"\"Sign synthtic policy for continuous actions.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each round (such as user information).\n+\n+ Returns\n+ ---------\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by a synthetic (deterministic) evaluation policy, i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+\n+ return np.sin(context.mean(1))\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement SyntheticContinuousBanditDataset |
641,014 | 06.07.2021 13:30:24 | -32,400 | c77dd99fbc7e66d33473d2cbd5ecc2e93a4abc87 | add tests of synthetic_continuous.py | [
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -304,12 +304,12 @@ def test_synthetic_linear_behavior_policy():\naction_context = [1.0, 1.0]\nlinear_behavior_policy(context=np.ones([2, 2]), action_context=action_context)\n- # expected_reward\n+ # pscore (action choice probabilities by behavior policy)\nn_rounds = 10\ndim_context = dim_action_context = 3\nn_actions = 5\ncontext = np.ones([n_rounds, dim_context])\naction_context = np.ones([n_actions, dim_action_context])\n- action_prob = linear_behavior_policy(context=context, action_context=action_context)\n- assert action_prob.shape[0] == n_rounds and action_prob.shape[1] == n_actions\n- assert np.all(0 <= action_prob) and np.all(action_prob <= 1)\n+ pscore = linear_behavior_policy(context=context, action_context=action_context)\n+ assert pscore.shape[0] == n_rounds and pscore.shape[1] == n_actions\n+ assert np.all(0 <= pscore) and np.all(pscore <= 1)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/dataset/test_synthetic_continuous.py",
"diff": "+import pytest\n+import numpy as np\n+\n+from obp.dataset import SyntheticContinuousBanditDataset\n+from obp.dataset.synthetic_continuous import (\n+ linear_reward_funcion_continuous,\n+ quadratic_reward_funcion_continuous,\n+ linear_behavior_policy_continuous,\n+ linear_synthetic_policy_continuous,\n+ threshold_synthetic_policy_continuous,\n+ sin_synthetic_policy_continuous,\n+)\n+\n+\n+def test_synthetic_continuous_init():\n+ # dim_context\n+ with pytest.raises(ValueError):\n+ SyntheticContinuousBanditDataset(dim_context=0)\n+\n+ with pytest.raises(TypeError):\n+ SyntheticContinuousBanditDataset(dim_context=\"2\")\n+\n+ with pytest.raises(TypeError):\n+ SyntheticContinuousBanditDataset(dim_context=None)\n+\n+ # random_state\n+ with pytest.raises(ValueError):\n+ SyntheticContinuousBanditDataset(random_state=None)\n+\n+ with pytest.raises(ValueError):\n+ SyntheticContinuousBanditDataset(random_state=\"3\")\n+\n+\n+# n_rounds, action_noise, reward_noise, min_action_value, max_action_value, err, description\n+invalid_input_of_obtain_batch_bandit_feedback = [\n+ (\n+ 0, #\n+ 1.0,\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ ValueError,\n+ \"`n_rounds`= 0, must be >= 1.\",\n+ ),\n+ (\n+ 1.0, #\n+ 1.0,\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ TypeError,\n+ \"`n_rounds` must be an instance of <class 'int'>, not <class 'float'>.\",\n+ ),\n+ (\n+ \"3\", #\n+ 1.0,\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ TypeError,\n+ \"`n_rounds` must be an instance of <class 'int'>, not <class 'str'>.\",\n+ ),\n+ (\n+ None, #\n+ 1.0,\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ TypeError,\n+ \"`n_rounds` must be an instance of <class 'int'>, not <class 'NoneType'>.\",\n+ ),\n+ (\n+ 3,\n+ -1.0, #\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ ValueError,\n+ \"`action_noise`= -1.0, must be >= 0.\",\n+ ),\n+ (\n+ 3,\n+ \"3\", #\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ TypeError,\n+ \"`action_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ),\n+ (\n+ 3,\n+ None, #\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ TypeError,\n+ \"`action_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ -1.0, #\n+ -1.0,\n+ 1.0,\n+ ValueError,\n+ \"`reward_noise`= -1.0, must be >= 0.\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ \"3\", #\n+ -1.0,\n+ 1.0,\n+ TypeError,\n+ \"`reward_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ None, #\n+ -1.0,\n+ 1.0,\n+ TypeError,\n+ \"`reward_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ 1.0,\n+ \"3\", #\n+ 1.0,\n+ TypeError,\n+ \"`min_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ 1.0,\n+ None, #\n+ 1.0,\n+ TypeError,\n+ \"`min_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ 1.0,\n+ 1.0,\n+ \"3\", #\n+ TypeError,\n+ \"`max_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ 1.0,\n+ 1.0,\n+ None, #\n+ TypeError,\n+ \"`max_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ 1.0,\n+ 1.0, #\n+ -1.0, #\n+ ValueError,\n+ \"`max_action_value` must be larger than `min_action_value`\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"n_rounds, action_noise, reward_noise, min_action_value, max_action_value, err, description\",\n+ invalid_input_of_obtain_batch_bandit_feedback,\n+)\n+def test_synthetic_continuous_obtain_batch_bandit_feedback_using_invalid_inputs(\n+ n_rounds,\n+ action_noise,\n+ reward_noise,\n+ min_action_value,\n+ max_action_value,\n+ err,\n+ description,\n+):\n+ dataset = SyntheticContinuousBanditDataset()\n+\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ action_noise=action_noise,\n+ reward_noise=reward_noise,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\n+ )\n+\n+\n+def test_synthetic_continuous_obtain_batch_bandit_feedback():\n+ # bandit feedback\n+ n_rounds = 10\n+ min_action_value = -1.0\n+ max_action_value = 1.0\n+ dataset = SyntheticContinuousBanditDataset()\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\n+ )\n+ assert bandit_feedback[\"n_rounds\"] == n_rounds\n+ assert (\n+ bandit_feedback[\"context\"].shape[0] == n_rounds # n_rounds\n+ and bandit_feedback[\"context\"].shape[1] == 1 # default dim_context\n+ )\n+ assert (\n+ bandit_feedback[\"action\"].ndim == 1\n+ and len(bandit_feedback[\"action\"]) == n_rounds\n+ )\n+ assert np.all(min_action_value <= bandit_feedback[\"action\"]) and np.all(\n+ bandit_feedback[\"action\"] <= max_action_value\n+ )\n+ assert bandit_feedback[\"position\"] is None\n+ assert (\n+ bandit_feedback[\"reward\"].ndim == 1\n+ and len(bandit_feedback[\"reward\"]) == n_rounds\n+ )\n+ assert (\n+ bandit_feedback[\"expected_reward\"].ndim == 1\n+ and len(bandit_feedback[\"expected_reward\"]) == n_rounds\n+ )\n+ assert (\n+ bandit_feedback[\"pscore\"].ndim == 1\n+ and len(bandit_feedback[\"pscore\"]) == n_rounds\n+ )\n+\n+\n+# context, action, description\n+invalid_input_of_calc_policy_value = [\n+ (\n+ np.ones((3, 1)),\n+ np.ones(4),\n+ \"the size of axis 0 of context must be the same as that of action\",\n+ ),\n+ (\n+ np.ones((4, 2)), #\n+ np.ones(4),\n+ \"the size of axis 1 of context must be the same as dim_context\",\n+ ),\n+ (\"3\", np.ones(4), \"context must be 2-dimensional ndarray\"),\n+ (None, np.ones(4), \"context must be 2-dimensional ndarray\"),\n+ (\n+ np.ones((4, 1)),\n+ np.ones((4, 1)), #\n+ \"action must be 1-dimensional ndarray\",\n+ ),\n+ (np.ones((4, 1)), \"3\", \"action must be 1-dimensional ndarray\"),\n+ (np.ones((4, 1)), None, \"action must be 1-dimensional ndarray\"),\n+]\n+\n+\[email protected](\n+ \"context, action, description\",\n+ invalid_input_of_calc_policy_value,\n+)\n+def test_synthetic_continuous_calc_policy_value_using_invalid_inputs(\n+ context,\n+ action,\n+ description,\n+):\n+ dataset = SyntheticContinuousBanditDataset()\n+\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dataset.calc_ground_truth_policy_value(\n+ context=context,\n+ action=action,\n+ )\n+\n+\n+def test_synthetic_continuous_calc_policy_value():\n+ n_rounds = 10\n+ dim_context = 3\n+ dataset = SyntheticContinuousBanditDataset(dim_context=dim_context)\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ min_action_value=1,\n+ max_action_value=10,\n+ )\n+\n+ policy_value = dataset.calc_ground_truth_policy_value(\n+ context=bandit_feedback[\"context\"],\n+ action=bandit_feedback[\"action\"],\n+ )\n+ assert isinstance(\n+ policy_value, float\n+ ), \"Invalid response of calc_ground_truth_policy_value\"\n+ assert policy_value == bandit_feedback[\"expected_reward\"].mean()\n+\n+\n+def test_synthetic_linear_reward_funcion_continuous():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ linear_reward_funcion_continuous(context=context, action=np.ones(2))\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ linear_reward_funcion_continuous(context=context, action=np.ones([2, 2]))\n+\n+ # action\n+ with pytest.raises(ValueError):\n+ action = np.array([1.0])\n+ linear_reward_funcion_continuous(context=np.ones([2, 2]), action=action)\n+\n+ with pytest.raises(ValueError):\n+ action = [1.0, 1.0]\n+ linear_reward_funcion_continuous(context=np.ones([2, 2]), action=action)\n+\n+ with pytest.raises(ValueError):\n+ linear_reward_funcion_continuous(context=np.ones([2, 2]), action=np.ones(3))\n+\n+ # expected_reward\n+ n_rounds = 10\n+ dim_context = 3\n+ context = np.ones([n_rounds, dim_context])\n+ action = np.ones(n_rounds)\n+ expected_reward = linear_reward_funcion_continuous(context=context, action=action)\n+ assert expected_reward.shape[0] == n_rounds and expected_reward.ndim == 1\n+\n+\n+def test_synthetic_quadratic_reward_funcion_continuous():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ quadratic_reward_funcion_continuous(context=context, action=np.ones(2))\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ quadratic_reward_funcion_continuous(context=context, action=np.ones([2, 2]))\n+\n+ # action\n+ with pytest.raises(ValueError):\n+ action = np.array([1.0])\n+ quadratic_reward_funcion_continuous(context=np.ones([2, 2]), action=action)\n+\n+ with pytest.raises(ValueError):\n+ action = [1.0, 1.0]\n+ quadratic_reward_funcion_continuous(context=np.ones([2, 2]), action=action)\n+\n+ with pytest.raises(ValueError):\n+ quadratic_reward_funcion_continuous(context=np.ones([2, 2]), action=np.ones(3))\n+\n+ # expected_reward\n+ n_rounds = 10\n+ dim_context = 3\n+ context = np.ones([n_rounds, dim_context])\n+ action = np.ones(n_rounds)\n+ expected_reward = quadratic_reward_funcion_continuous(\n+ context=context, action=action\n+ )\n+ assert expected_reward.shape[0] == n_rounds and expected_reward.ndim == 1\n+\n+\n+def test_synthetic_linear_behavior_policy_continuous():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ linear_behavior_policy_continuous(context=context)\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ linear_behavior_policy_continuous(context=context)\n+\n+ # expected continuous action values by behavior policy\n+ n_rounds = 10\n+ dim_context = 3\n+ context = np.ones([n_rounds, dim_context])\n+ expected_continuous_actions = linear_behavior_policy_continuous(context=context)\n+ assert (\n+ expected_continuous_actions.shape[0] == n_rounds\n+ and expected_continuous_actions.ndim == 1\n+ )\n+\n+\n+def test_linear_synthetic_policy_continuous():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ linear_behavior_policy_continuous(context=context)\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ linear_behavior_policy_continuous(context=context)\n+\n+ # continuous action values given by a synthetic policy\n+ n_rounds = 10\n+ dim_context = 3\n+ context = np.ones([n_rounds, dim_context])\n+ continuous_actions = linear_synthetic_policy_continuous(context=context)\n+ assert continuous_actions.shape[0] == n_rounds and continuous_actions.ndim == 1\n+\n+\n+def test_threshold_synthetic_policy_continuous():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ threshold_synthetic_policy_continuous(context=context)\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ threshold_synthetic_policy_continuous(context=context)\n+\n+ # continuous action values given by a synthetic policy\n+ n_rounds = 10\n+ dim_context = 3\n+ context = np.ones([n_rounds, dim_context])\n+ continuous_actions = threshold_synthetic_policy_continuous(context=context)\n+ assert continuous_actions.shape[0] == n_rounds and continuous_actions.ndim == 1\n+\n+\n+def test_sin_synthetic_policy_continuous():\n+ # context\n+ with pytest.raises(ValueError):\n+ context = np.array([1.0, 1.0])\n+ sin_synthetic_policy_continuous(context=context)\n+\n+ with pytest.raises(ValueError):\n+ context = [1.0, 1.0]\n+ sin_synthetic_policy_continuous(context=context)\n+\n+ # continuous action values given by a synthetic policy\n+ n_rounds = 10\n+ dim_context = 3\n+ context = np.ones([n_rounds, dim_context])\n+ continuous_actions = sin_synthetic_policy_continuous(context=context)\n+ assert continuous_actions.shape[0] == n_rounds and continuous_actions.ndim == 1\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add tests of synthetic_continuous.py |
641,014 | 06.07.2021 23:08:30 | -32,400 | c0874d4284a71852a5eca985a78957cb8f491bd9 | implement continuous ope estimators | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -10,8 +10,18 @@ from obp.ope.estimators import DoublyRobustWithShrinkage\nfrom obp.ope.estimators_slate import SlateStandardIPS\nfrom obp.ope.estimators_slate import SlateIndependentIPS\nfrom obp.ope.estimators_slate import SlateRewardInteractionIPS\n+from obp.ope.estimators_continuous import KernelizedInverseProbabilityWeighting\n+from obp.ope.estimators_continuous import (\n+ KernelizedSelfNormalizedInverseProbabilityWeighting,\n+)\n+from obp.ope.estimators_continuous import triangular_kernel\n+from obp.ope.estimators_continuous import gaussian_kernel\n+from obp.ope.estimators_continuous import epanechnikov_kernel\n+from obp.ope.estimators_continuous import cosine_kernel\n+from obp.ope.estimators_continuous import KernelizedDoublyRobust\nfrom obp.ope.meta import OffPolicyEvaluation\nfrom obp.ope.meta_slate import SlateOffPolicyEvaluation\n+from obp.ope.meta_continuous import ContinuousOffPolicyEvaluation\nfrom obp.ope.regression_model import RegressionModel\n__all__ = [\n@@ -26,10 +36,18 @@ __all__ = [\n\"DoublyRobustWithShrinkage\",\n\"OffPolicyEvaluation\",\n\"SlateOffPolicyEvaluation\",\n+ \"ContinuousOffPolicyEvaluation\",\n\"RegressionModel\",\n\"SlateStandardIPS\",\n\"SlateIndependentIPS\",\n\"SlateRewardInteractionIPS\",\n+ \"KernelizedInverseProbabilityWeighting\",\n+ \"KernelizedSelfNormalizedInverseProbabilityWeighting\",\n+ \"KernelizedDoublyRobust\",\n+ \"triangular_kernel\",\n+ \"gaussian_kernel\",\n+ \"epanechnikov_kernel\",\n+ \"cosine_kernel\",\n]\n__all_estimators__ = [\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "obp/ope/estimators_continuous.py",
"diff": "+# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.\n+# Licensed under the Apache 2.0 License.\n+\n+\"\"\"Off-Policy Estimators for Continuous Actions.\"\"\"\n+from abc import ABCMeta, abstractmethod\n+from dataclasses import dataclass\n+from typing import Dict, Optional, Union\n+\n+import numpy as np\n+from sklearn.utils import check_scalar\n+\n+from ..utils import (\n+ estimate_confidence_interval_by_bootstrap,\n+ check_continuous_ope_inputs,\n+)\n+\n+# kernel functions\n+# reference: https://en.wikipedia.org/wiki/Kernel_(statistics)\n+def triangular_kernel(u: np.ndarray) -> np.ndarray:\n+ \"\"\"Calculate triangular kernel function.\"\"\"\n+ clipped_u = np.clip(u, -1.0, 1.0)\n+ return 1 - np.abs(clipped_u)\n+\n+\n+def gaussian_kernel(u: np.ndarray) -> np.ndarray:\n+ \"\"\"Calculate gaussian kernel function.\"\"\"\n+ return np.exp(-(u ** 2) / 2) / np.sqrt(2 * np.pi)\n+\n+\n+def epanechnikov_kernel(u: np.ndarray) -> np.ndarray:\n+ \"\"\"Calculate epanechnikov kernel function.\"\"\"\n+ clipped_u = np.clip(u, -1.0, 1.0)\n+ return 0.75 * (1 - clipped_u ** 2)\n+\n+\n+def cosine_kernel(u: np.ndarray) -> np.ndarray:\n+ \"\"\"Calculate cosine kernel function.\"\"\"\n+ clipped_u = np.clip(u, -1.0, 1.0)\n+ return (np.pi / 4) * np.cos(clipped_u * np.pi / 2)\n+\n+\n+kernel_functions = dict(\n+ gaussian=gaussian_kernel,\n+ epanechnikov=epanechnikov_kernel,\n+ triangular=triangular_kernel,\n+ cosine=cosine_kernel,\n+)\n+\n+\n+@dataclass\n+class BaseOffPolicyEstimatorForContinuousAction(metaclass=ABCMeta):\n+ \"\"\"Base class for OPE estimators for continuous actions.\"\"\"\n+\n+ @abstractmethod\n+ def _estimate_round_rewards(self) -> np.ndarray:\n+ \"\"\"Estimate rewards for each round.\"\"\"\n+ raise NotImplementedError\n+\n+ @abstractmethod\n+ def estimate_policy_value(self) -> float:\n+ \"\"\"Estimate policy value of an evaluation policy.\"\"\"\n+ raise NotImplementedError\n+\n+ @abstractmethod\n+ def estimate_interval(self) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\"\"\"\n+ raise NotImplementedError\n+\n+\n+@dataclass\n+class KernelizedInverseProbabilityWeighting(BaseOffPolicyEstimatorForContinuousAction):\n+ \"\"\"Kernelized Inverse Probability Weighting.\n+\n+ Note\n+ -------\n+ Kernelized Inverse Probability Weighting (KernelizedIPW)\n+ estimates the policy value of a given (deterministic) evaluation policy :math:`\\\\pi_e` by\n+\n+ .. math::\n+\n+ \\\\hat{V}_{\\\\mathrm{Kernel-IPW}} (\\\\pi_e; \\\\mathcal{D})\n+ := \\\\mathbb{E}_{\\\\mathcal{D}} \\\\left[ \\\\frac{1}{h} K \\\\left( \\\\frac{\\pi_e(x_t) - a_t}{h} \\\\right) \\\\frac{r_t}{q_t} \\\\right],\n+\n+ where :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by a behavior policy.\n+ Note that each action :math:`a_t` in the logged bandit data is a continuous variable.\n+ :math:`q_t` is a generalized propensity score that is defined as the conditional probability density of the behavior policy.\n+ :math:`K(\\cdot)` is a kernel function such as the gaussian kernel, and :math:`h` is a bandwidth hyperparameter.\n+ :math:`\\\\pi_e (x)` is a deterministic evaluation policy that maps :math:`x` to a continuous action value.\n+ :math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n+\n+ Parameters\n+ ------------\n+\n+ kernel: str\n+ Choice of kernel function.\n+ Must be one of \"gaussian\", \"epanechnikov\", \"triangular\", or \"cosine\".\n+\n+ bandwidth: float\n+ A bandwidth hyperparameter.\n+ A larger value increases bias instead of reducing variance.\n+ A smaller value increases variance instead of reducing bias.\n+\n+ estimator_name: str, default='kernelized_ipw'.\n+ Name of off-policy estimator.\n+\n+ References\n+ ------------\n+ Nathan Kallus and Angela Zhou.\n+ \"Policy Evaluation and Optimization with Continuous Treatments\", 2018.\n+\n+ \"\"\"\n+\n+ kernel: str\n+ bandwidth: float\n+ estimator_name: str = \"kernelized_ipw\"\n+\n+ def __post_init__(self) -> None:\n+ if self.kernel not in [\"gaussian\", \"epanechnikov\", \"triangular\", \"cosine\"]:\n+ raise ValueError(\n+ f\"kernel must be one of 'gaussian', 'epanechnikov', 'triangular', and 'cosine' but {self.kernel} is given\"\n+ )\n+ check_scalar(\n+ self.bandwidth, name=\"bandwidth\", target_type=(int, float), min_val=0\n+ )\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate rewards for each round.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like, shape (n_rounds,)\n+ Rewards estimated by KernelizedIPW for each round.\n+\n+ \"\"\"\n+ kernel_func = kernel_functions[self.kernel]\n+ u = action_by_evaluation_policy - action_by_behavior_policy\n+ u /= self.bandwidth\n+ estimated_rewards = kernel_func(u) * reward / pscore\n+ estimated_rewards /= self.bandwidth\n+ return estimated_rewards\n+\n+ def estimate_policy_value(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate policy value of an evaluation policy.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ Returns\n+ ----------\n+ V_hat: float\n+ Estimated policy value (performance) of a given evaluation policy.\n+\n+ \"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action_by_behavior_policy, np.ndarray):\n+ raise ValueError(\"action_by_behavior_policy must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ check_continuous_ope_inputs(\n+ reward=reward,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ pscore=pscore,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ )\n+\n+ return self._estimate_round_rewards(\n+ reward=reward,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ pscore=pscore,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ ).mean()\n+\n+ def estimate_interval(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+ ) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ alpha: float, default=0.05\n+ Significant level of confidence intervals.\n+\n+ n_bootstrap_samples: int, default=10000\n+ Number of resampling performed in the bootstrap procedure.\n+\n+ random_state: int, default=None\n+ Controls the random seed in bootstrap sampling.\n+\n+ Returns\n+ ----------\n+ estimated_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action_by_behavior_policy, np.ndarray):\n+ raise ValueError(\"action_by_behavior_policy must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ check_continuous_ope_inputs(\n+ reward=reward,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ pscore=pscore,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ )\n+\n+ estimated_round_rewards = self._estimate_round_rewards(\n+ reward=reward,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ pscore=pscore,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ )\n+\n+ return estimate_confidence_interval_by_bootstrap(\n+ samples=estimated_round_rewards,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\n+@dataclass\n+class KernelizedSelfNormalizedInverseProbabilityWeighting(\n+ KernelizedInverseProbabilityWeighting\n+):\n+ \"\"\"Kernelized Self-Normalized Inverse Probability Weighting.\n+\n+ Note\n+ -------\n+ Kernelized Self-Normalized Inverse Probability Weighting (KernelizedSNIPW)\n+ estimates the policy value of a given (deterministic) evaluation policy :math:`\\\\pi_e` by\n+\n+ .. math::\n+\n+ \\\\hat{V}_{\\\\mathrm{Kernel-SNIPW}} (\\\\pi_e; \\\\mathcal{D})\n+ := \\\\frac{\\\\mathbb{E}_{\\\\mathcal{D}} \\\\left[ K \\\\left( \\\\frac{\\pi_e(x_t) - a_t}{h} \\\\right) \\\\frac{r_t}{q_t} \\\\right]}{\\\\mathbb{E}_{\\\\mathcal{D}} \\\\left[ K \\\\left( \\\\frac{\\pi_e(x_t) - a_t}{h} \\\\right) \\\\frac{r_t}{q_t}},\n+\n+ where :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by a behavior policy.\n+ Note that each action :math:`a_t` in the logged bandit data is a continuous variable.\n+ :math:`q_t` is a generalized propensity score that is defined as the conditional probability density of the behavior policy.\n+ :math:`K(\\cdot)` is a kernel function such as the gaussian kernel, and :math:`h` is a bandwidth hyperparameter.\n+ :math:`\\\\pi_e (x)` is a deterministic evaluation policy that maps :math:`x` to a continuous action value.\n+ :math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n+\n+ Parameters\n+ ------------\n+ kernel: str\n+ Choice of kernel function.\n+ Must be one of \"gaussian\", \"epanechnikov\", and \"triangular\".\n+\n+ bandwidth: float\n+ A bandwidth hyperparameter.\n+ A larger value increases bias instead of reducing variance.\n+ A smaller value increases variance instead of reducing bias.\n+\n+ estimator_name: str, default='kernelized_snipw'.\n+ Name of off-policy estimator.\n+\n+ References\n+ ------------\n+ Nathan Kallus and Angela Zhou.\n+ \"Policy Evaluation and Optimization with Continuous Treatments\", 2018.\n+\n+ \"\"\"\n+\n+ kernel: str\n+ bandwidth: float\n+ estimator_name: str = \"kernelized_snipw\"\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate rewards for each round.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like, shape (n_rounds,)\n+ Rewards estimated by KernelizedSNIPW for each round.\n+\n+ \"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action_by_behavior_policy, np.ndarray):\n+ raise ValueError(\"action_by_behavior_policy must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ kernel_func = kernel_functions[self.kernel]\n+ u = action_by_evaluation_policy - action_by_behavior_policy\n+ u /= self.bandwidth\n+ estimated_rewards = kernel_func(u) * reward / pscore\n+ estimated_rewards /= (kernel_func(u) / pscore).sum() / reward.shape[0]\n+ return estimated_rewards\n+\n+\n+@dataclass\n+class KernelizedDoublyRobust(BaseOffPolicyEstimatorForContinuousAction):\n+ \"\"\"Kernelized Doubly Robust Estimator.\n+\n+ Note\n+ -------\n+ Kernelized Doubly Robust (KernelizedDR) estimates the policy value of a given (deterministic) evaluation policy :math:`\\\\pi_e` by\n+\n+ .. math::\n+\n+ \\\\hat{V}_{\\\\mathrm{Kernel-DR}} (\\\\pi_e; \\\\mathcal{D})\n+ := \\\\mathbb{E}_{\\\\mathcal{D}} \\\\left[ \\\\frac{1}{h} K \\\\left( \\\\frac{\\pi_e(x_t) - a_t}{h} \\\\right) \\\\frac{(r_t - \\\\hat{q}(x_t, \\\\pi_e(x_t)))}{q_t} + \\\\hat{q}(x_t, \\\\pi_e(x_t)) \\\\right],\n+\n+ where :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by a behavior policy.\n+ Note that each action :math:`a_t` in the logged bandit data is a continuous variable.\n+ :math:`q_t` is a generalized propensity score that is defined as the conditional probability density of the behavior policy.\n+ :math:`K(\\cdot)` is a kernel function such as the gaussian kernel, and :math:`h` is a bandwidth hyperparameter.\n+ :math:`\\\\pi_e (x)` is a deterministic evaluation policy that maps :math:`x` to a continuous action value.\n+ :math:`\\\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.\n+ :math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n+\n+ Parameters\n+ ------------\n+ kernel: str\n+ Choice of kernel function.\n+ Must be one of \"gaussian\", \"epanechnikov\", and \"triangular\".\n+\n+ bandwidth: float\n+ A bandwidth hyperparameter.\n+ A larger value increases bias instead of reducing variance.\n+ A smaller value increases variance instead of reducing bias.\n+\n+ estimator_name: str, default='kernelized_dr'.\n+ Name of off-policy estimator.\n+\n+ References\n+ ------------\n+ Nathan Kallus and Angela Zhou.\n+ \"Policy Evaluation and Optimization with Continuous Treatments\", 2018.\n+\n+ \"\"\"\n+\n+ kernel: str\n+ bandwidth: float\n+ estimator_name: str = \"kernelized_dr\"\n+\n+ def __post_init__(self) -> None:\n+ if self.kernel not in [\"gaussian\", \"epanechnikov\", \"triangular\", \"cosine\"]:\n+ raise ValueError(\n+ f\"kernel must be one of 'gaussian', 'epanechnikov', 'triangular', and 'cosine' but {self.kernel} is given\"\n+ )\n+ check_scalar(\n+ self.bandwidth, name=\"bandwidth\", target_type=(int, float), min_val=0\n+ )\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate rewards for each round.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds,)\n+ Expected rewards given context and action estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like, shape (n_rounds,)\n+ Rewards estimated by KernelizedDR for each round.\n+\n+ \"\"\"\n+ kernel_func = kernel_functions[self.kernel]\n+ u = action_by_evaluation_policy - action_by_behavior_policy\n+ u /= self.bandwidth\n+ estimated_rewards = (\n+ kernel_func(u) * (reward - estimated_rewards_by_reg_model) / pscore\n+ )\n+ estimated_rewards /= self.bandwidth\n+ estimated_rewards += estimated_rewards_by_reg_model\n+ return estimated_rewards\n+\n+ def estimate_policy_value(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate policy value of an evaluation policy.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds,)\n+ Expected rewards given context and action estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ Returns\n+ ----------\n+ V_hat: float\n+ Estimated policy value (performance) of a given evaluation policy.\n+\n+ \"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action_by_behavior_policy, np.ndarray):\n+ raise ValueError(\"action_by_behavior_policy must be ndarray\")\n+ if not isinstance(estimated_rewards_by_reg_model, np.ndarray):\n+ raise ValueError(\"estimated_rewards_by_reg_model must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ check_continuous_ope_inputs(\n+ reward=reward,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ pscore=pscore,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+ return self._estimate_round_rewards(\n+ reward=reward,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ pscore=pscore,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ ).mean()\n+\n+ def estimate_interval(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+ ) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,)\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds,)\n+ Expected rewards given context and action estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ alpha: float, default=0.05\n+ Significant level of confidence intervals.\n+\n+ n_bootstrap_samples: int, default=10000\n+ Number of resampling performed in the bootstrap procedure.\n+\n+ random_state: int, default=None\n+ Controls the random seed in bootstrap sampling.\n+\n+ Returns\n+ ----------\n+ estimated_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\n+ if not isinstance(reward, np.ndarray):\n+ raise ValueError(\"reward must be ndarray\")\n+ if not isinstance(action_by_behavior_policy, np.ndarray):\n+ raise ValueError(\"action_by_behavior_policy must be ndarray\")\n+ if not isinstance(estimated_rewards_by_reg_model, np.ndarray):\n+ raise ValueError(\"estimated_rewards_by_reg_model must be ndarray\")\n+ if not isinstance(pscore, np.ndarray):\n+ raise ValueError(\"pscore must be ndarray\")\n+\n+ check_continuous_ope_inputs(\n+ reward=reward,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ pscore=pscore,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+ estimated_round_rewards = self._estimate_round_rewards(\n+ reward=reward,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ pscore=pscore,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+ return estimate_confidence_interval_by_bootstrap(\n+ samples=estimated_round_rewards,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement continuous ope estimators |
641,014 | 06.07.2021 23:09:03 | -32,400 | bf2eaf96bde790d4e46fa0240b526eb8ad8294ee | add tests of continuous ope estimators | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ope/test_dr_estimators_continuous.py",
"diff": "+import pytest\n+import numpy as np\n+\n+from obp.ope import KernelizedDoublyRobust\n+from obp.dataset import (\n+ SyntheticContinuousBanditDataset,\n+ linear_reward_funcion_continuous,\n+ linear_behavior_policy_continuous,\n+ linear_synthetic_policy_continuous,\n+)\n+\n+\n+def test_synthetic_init():\n+ # kernel\n+ with pytest.raises(ValueError):\n+ KernelizedDoublyRobust(kernel=\"a\", bandwidth=0.1)\n+\n+ with pytest.raises(ValueError):\n+ KernelizedDoublyRobust(kernel=None, bandwidth=0.1)\n+\n+ # bandwidth\n+ with pytest.raises(TypeError):\n+ KernelizedDoublyRobust(kernel=\"gaussian\", bandwidth=\"a\")\n+\n+ with pytest.raises(TypeError):\n+ KernelizedDoublyRobust(kernel=\"gaussian\", bandwidth=None)\n+\n+ with pytest.raises(ValueError):\n+ KernelizedDoublyRobust(kernel=\"gaussian\", bandwidth=-1.0)\n+\n+\n+# prepare dr instances\n+dr = KernelizedDoublyRobust(kernel=\"cosine\", bandwidth=0.1)\n+\n+# --- invalid inputs (all kernelized estimators) ---\n+\n+# action_by_evaluation_policy, estimated_rewards_by_reg_model, action_by_behavior_policy, reward, pscore, description\n+invalid_input_of_dr = [\n+ (\n+ None, #\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_evaluation_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((5, 1)), #\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_evaluation_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ None, #\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"estimated_rewards_by_reg_model must be ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones((5, 1)), #\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"estimated_rewards_by_reg_model must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5), #\n+ np.ones(4), #\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"estimated_rewards_by_reg_model and action_by_evaluation_policy must be the same size\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ None, #\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_behavior_policy must be ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones((5, 1)), #\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_behavior_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ None, #\n+ np.random.uniform(size=5),\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones((5, 1)), #\n+ np.random.uniform(size=5),\n+ \"reward must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(4), #\n+ np.ones(3), #\n+ np.random.uniform(size=5),\n+ \"action_by_behavior_policy and reward must be the same size\",\n+ ),\n+ (\n+ np.ones(5), #\n+ np.ones(5),\n+ np.ones(4), #\n+ np.ones(4),\n+ np.random.uniform(size=5),\n+ \"action_by_behavior_policy and action_by_evaluation_policy must be the same size\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ None, #\n+ \"pscore must be ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=(5, 1)), #\n+ \"pscore must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=4), #\n+ \"action_by_behavior_policy, reward, and pscore must be the same size\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.arange(5), #\n+ \"pscore must be positive\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, action_by_behavior_policy, reward, pscore, description\",\n+ invalid_input_of_dr,\n+)\n+def test_dr_continuous_using_invalid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+ description: str,\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dr.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dr.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+\n+# --- valid inputs (all kernelized estimators) ---\n+\n+# action_by_evaluation_policy, estimated_rewards_by_reg_model, action_by_behavior_policy, reward, pscore\n+valid_input_of_dr = [\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, action_by_behavior_policy, reward, pscore\",\n+ valid_input_of_dr,\n+)\n+def test_dr_continuous_using_valid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+) -> None:\n+ _ = dr.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+\n+# --- confidence intervals ---\n+# alpha, n_bootstrap_samples, random_state, description\n+invalid_input_of_estimate_intervals = [\n+ (0.05, 100, \"s\", \"random_state must be an integer\"),\n+ (0.05, -1, 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.05, \"s\", 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (1.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (\"0\", 1, 1, \"alpha must be a positive float (< 1)\"),\n+]\n+\n+valid_input_of_estimate_intervals = [\n+ (0.05, 100, 1, \"random_state is 1\"),\n+ (0.05, 1, 1, \"n_bootstrap_samples is 1\"),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, action_by_behavior_policy, reward, pscore\",\n+ valid_input_of_dr,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description\",\n+ invalid_input_of_estimate_intervals,\n+)\n+def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description,\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = dr.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, action_by_behavior_policy, reward, pscore\",\n+ valid_input_of_dr,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description\",\n+ valid_input_of_estimate_intervals,\n+)\n+def test_estimate_intervals_of_all_estimators_using_valid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description,\n+) -> None:\n+ _ = dr.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"kernel\",\n+ [\"triangular\", \"gaussian\", \"epanechnikov\", \"cosine\"],\n+)\n+def test_continuous_ope_performance(kernel):\n+ # define dr instances\n+ dr = KernelizedDoublyRobust(kernel=kernel, bandwidth=0.1)\n+ # set parameters\n+ dim_context = 2\n+ reward_noise = 0.1\n+ random_state = 12345\n+ n_rounds = 10000\n+ min_action_value = -10\n+ max_action_value = 10\n+ behavior_policy_function = linear_behavior_policy_continuous\n+ reward_function = linear_reward_funcion_continuous\n+ dataset = SyntheticContinuousBanditDataset(\n+ dim_context=dim_context,\n+ reward_function=reward_function,\n+ behavior_policy_function=behavior_policy_function,\n+ random_state=random_state,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ reward_noise=reward_noise,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\n+ )\n+ context = bandit_feedback[\"context\"]\n+ action_by_evaluation_policy = linear_synthetic_policy_continuous(context)\n+ action_by_behavior_policy = bandit_feedback[\"action\"]\n+ reward = bandit_feedback[\"reward\"]\n+ pscore = bandit_feedback[\"pscore\"]\n+\n+ # compute statistics of ground truth policy value\n+ q_pi_e = linear_reward_funcion_continuous(\n+ context=context, action=action_by_evaluation_policy, random_state=random_state\n+ )\n+ true_policy_value = q_pi_e.mean()\n+ print(f\"true_policy_value: {true_policy_value}\")\n+\n+ # OPE\n+ policy_value_estimated_by_dr = dr.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=q_pi_e,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+ # check the performance of OPE\n+ estimated_policy_value = {\n+ \"dr\": policy_value_estimated_by_dr,\n+ }\n+ for key in estimated_policy_value:\n+ print(\n+ f\"estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, \"\n+ )\n+ # test the performance of each estimator\n+ assert (\n+ np.abs(true_policy_value - estimated_policy_value[key]) / true_policy_value\n+ <= 0.1\n+ ), f\"{key} does not work well (relative estimation error is greater than 10%)\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ope/test_ipw_estimators_continuous.py",
"diff": "+import pytest\n+import numpy as np\n+\n+from obp.ope import (\n+ KernelizedInverseProbabilityWeighting,\n+ KernelizedSelfNormalizedInverseProbabilityWeighting,\n+)\n+from obp.dataset import (\n+ SyntheticContinuousBanditDataset,\n+ linear_reward_funcion_continuous,\n+ linear_behavior_policy_continuous,\n+ linear_synthetic_policy_continuous,\n+)\n+\n+\n+def test_synthetic_init():\n+ # kernel\n+ with pytest.raises(ValueError):\n+ KernelizedInverseProbabilityWeighting(kernel=\"a\", bandwidth=0.1)\n+\n+ with pytest.raises(ValueError):\n+ KernelizedInverseProbabilityWeighting(kernel=None, bandwidth=0.1)\n+\n+ with pytest.raises(ValueError):\n+ KernelizedSelfNormalizedInverseProbabilityWeighting(kernel=\"a\", bandwidth=0.1)\n+\n+ with pytest.raises(ValueError):\n+ KernelizedSelfNormalizedInverseProbabilityWeighting(kernel=None, bandwidth=0.1)\n+\n+ # bandwidth\n+ with pytest.raises(TypeError):\n+ KernelizedInverseProbabilityWeighting(kernel=\"gaussian\", bandwidth=\"a\")\n+\n+ with pytest.raises(TypeError):\n+ KernelizedInverseProbabilityWeighting(kernel=\"gaussian\", bandwidth=None)\n+\n+ with pytest.raises(ValueError):\n+ KernelizedInverseProbabilityWeighting(kernel=\"gaussian\", bandwidth=-1.0)\n+\n+ with pytest.raises(TypeError):\n+ KernelizedSelfNormalizedInverseProbabilityWeighting(\n+ kernel=\"gaussian\", bandwidth=\"a\"\n+ )\n+\n+ with pytest.raises(TypeError):\n+ KernelizedSelfNormalizedInverseProbabilityWeighting(\n+ kernel=\"gaussian\", bandwidth=None\n+ )\n+\n+ with pytest.raises(ValueError):\n+ KernelizedSelfNormalizedInverseProbabilityWeighting(\n+ kernel=\"gaussian\", bandwidth=-1.0\n+ )\n+\n+\n+# prepare ipw instances\n+ipw = KernelizedInverseProbabilityWeighting(kernel=\"cosine\", bandwidth=0.1)\n+snipw = KernelizedSelfNormalizedInverseProbabilityWeighting(\n+ kernel=\"epanechnikov\", bandwidth=0.1\n+)\n+\n+# --- invalid inputs (all kernelized estimators) ---\n+\n+# action_by_evaluation_policy, action_by_behavior_policy, reward, pscore, description\n+invalid_input_of_ipw = [\n+ (\n+ None, #\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_evaluation_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((5, 1)), #\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_evaluation_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ None, #\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_behavior_policy must be ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones((5, 1)), #\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_behavior_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ None, #\n+ np.random.uniform(size=5),\n+ \"reward must be ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones((5, 1)), #\n+ np.random.uniform(size=5),\n+ \"reward must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(4), #\n+ np.ones(3), #\n+ np.random.uniform(size=5),\n+ \"action_by_behavior_policy and reward must be the same size\",\n+ ),\n+ (\n+ np.ones(4), #\n+ np.ones(5), #\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ \"action_by_behavior_policy and action_by_evaluation_policy must be the same size\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ None, #\n+ \"pscore must be ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=(5, 1)), #\n+ \"pscore must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=4), #\n+ \"action_by_behavior_policy, reward, and pscore must be the same size\",\n+ ),\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.arange(5), #\n+ \"pscore must be positive\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, action_by_behavior_policy, reward, pscore, description\",\n+ invalid_input_of_ipw,\n+)\n+def test_ipw_continuous_using_invalid_input_data(\n+ action_by_evaluation_policy,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+ description: str,\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ipw.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ipw.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = snipw.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = snipw.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+\n+# --- valid inputs (all kernelized estimators) ---\n+\n+# action_by_evaluation_policy, action_by_behavior_policy, reward, pscore\n+valid_input_of_ipw = [\n+ (\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5),\n+ np.random.uniform(size=5),\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, action_by_behavior_policy, reward, pscore\",\n+ valid_input_of_ipw,\n+)\n+def test_ipw_continuous_using_valid_input_data(\n+ action_by_evaluation_policy,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+) -> None:\n+ _ = ipw.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+ _ = ipw.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+ _ = snipw.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+ _ = snipw.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+\n+# --- confidence intervals ---\n+# alpha, n_bootstrap_samples, random_state, description\n+invalid_input_of_estimate_intervals = [\n+ (0.05, 100, \"s\", \"random_state must be an integer\"),\n+ (0.05, -1, 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.05, \"s\", 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (1.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (\"0\", 1, 1, \"alpha must be a positive float (< 1)\"),\n+]\n+\n+valid_input_of_estimate_intervals = [\n+ (0.05, 100, 1, \"random_state is 1\"),\n+ (0.05, 1, 1, \"n_bootstrap_samples is 1\"),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, action_by_behavior_policy, reward, pscore\",\n+ valid_input_of_ipw,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description\",\n+ invalid_input_of_estimate_intervals,\n+)\n+def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\n+ action_by_evaluation_policy,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description,\n+) -> None:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ipw.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = snipw.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, action_by_behavior_policy, reward, pscore\",\n+ valid_input_of_ipw,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description\",\n+ valid_input_of_estimate_intervals,\n+)\n+def test_estimate_intervals_of_all_estimators_using_valid_input_data(\n+ action_by_evaluation_policy,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description,\n+) -> None:\n+ _ = ipw.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = snipw.estimate_interval(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"kernel\",\n+ [\"triangular\", \"gaussian\", \"epanechnikov\", \"cosine\"],\n+)\n+def test_continuous_ope_performance(kernel):\n+ # define ipw instances\n+ ipw = KernelizedInverseProbabilityWeighting(kernel=kernel, bandwidth=0.1)\n+ snipw = KernelizedSelfNormalizedInverseProbabilityWeighting(\n+ kernel=kernel, bandwidth=0.1\n+ )\n+ # set parameters\n+ dim_context = 2\n+ reward_noise = 0.1\n+ random_state = 12345\n+ n_rounds = 10000\n+ min_action_value = -10\n+ max_action_value = 10\n+ behavior_policy_function = linear_behavior_policy_continuous\n+ reward_function = linear_reward_funcion_continuous\n+ dataset = SyntheticContinuousBanditDataset(\n+ dim_context=dim_context,\n+ reward_function=reward_function,\n+ behavior_policy_function=behavior_policy_function,\n+ random_state=random_state,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ reward_noise=reward_noise,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\n+ )\n+ context = bandit_feedback[\"context\"]\n+ action_by_evaluation_policy = linear_synthetic_policy_continuous(context)\n+ action_by_behavior_policy = bandit_feedback[\"action\"]\n+ reward = bandit_feedback[\"reward\"]\n+ pscore = bandit_feedback[\"pscore\"]\n+\n+ # compute statistics of ground truth policy value\n+ q_pi_e = linear_reward_funcion_continuous(\n+ context=context, action=action_by_evaluation_policy, random_state=random_state\n+ )\n+ true_policy_value = q_pi_e.mean()\n+ print(f\"true_policy_value: {true_policy_value}\")\n+\n+ # OPE\n+ policy_value_estimated_by_ipw = ipw.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+ policy_value_estimated_by_snipw = snipw.estimate_policy_value(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+ # check the performance of OPE\n+ estimated_policy_value = {\n+ \"ipw\": policy_value_estimated_by_ipw,\n+ \"snipw\": policy_value_estimated_by_snipw,\n+ }\n+ for key in estimated_policy_value:\n+ print(\n+ f\"estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, \"\n+ )\n+ # test the performance of each estimator\n+ assert (\n+ np.abs(true_policy_value - estimated_policy_value[key]) / true_policy_value\n+ <= 0.1\n+ ), f\"{key} does not work well (relative estimation error is greater than 10%)\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ope/test_kernel_functions.py",
"diff": "+import numpy as np\n+from scipy import integrate\n+\n+from obp.ope import (\n+ triangular_kernel,\n+ epanechnikov_kernel,\n+ gaussian_kernel,\n+ cosine_kernel,\n+)\n+\n+\n+def test_kernel_functions():\n+ # triangular\n+ assert np.isclose(\n+ integrate.quad(lambda x: triangular_kernel(x), -np.inf, np.inf)[0], 1\n+ )\n+ assert np.isclose(\n+ integrate.quad(lambda x: x * triangular_kernel(x), -np.inf, np.inf)[0], 0\n+ )\n+ assert integrate.quad(lambda x: triangular_kernel(x) ** 2, -np.inf, np.inf)[0] > 0\n+\n+ # epanechnikov\n+ assert np.isclose(\n+ integrate.quad(lambda x: epanechnikov_kernel(x), -np.inf, np.inf)[0], 1\n+ )\n+ assert np.isclose(\n+ integrate.quad(lambda x: x * epanechnikov_kernel(x), -np.inf, np.inf)[0], 0\n+ )\n+ assert integrate.quad(lambda x: epanechnikov_kernel(x) ** 2, -np.inf, np.inf)[0] > 0\n+\n+ # gaussian\n+ assert np.isclose(\n+ integrate.quad(lambda x: gaussian_kernel(x), -np.inf, np.inf)[0], 1\n+ )\n+ assert np.isclose(\n+ integrate.quad(lambda x: x * gaussian_kernel(x), -np.inf, np.inf)[0], 0\n+ )\n+ assert integrate.quad(lambda x: gaussian_kernel(x) ** 2, -np.inf, np.inf)[0] > 0\n+\n+ # cosine\n+ assert np.isclose(integrate.quad(lambda x: cosine_kernel(x), -np.inf, np.inf)[0], 1)\n+ assert np.isclose(\n+ integrate.quad(lambda x: x * cosine_kernel(x), -np.inf, np.inf)[0], 0\n+ )\n+ assert integrate.quad(lambda x: cosine_kernel(x) ** 2, -np.inf, np.inf)[0] > 0\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add tests of continuous ope estimators |
641,014 | 06.07.2021 23:09:21 | -32,400 | ea3b2f915bacfd8dc91155a12ce702f784d924fb | add some check funcs for continuous ope | [
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -331,6 +331,150 @@ def check_ope_inputs(\nraise ValueError(\"pscore must be positive\")\n+def check_continuous_bandit_feedback_inputs(\n+ context: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ reward: np.ndarray,\n+ expected_reward: Optional[np.ndarray] = None,\n+ pscore: Optional[np.ndarray] = None,\n+) -> Optional[ValueError]:\n+ \"\"\"Check inputs for bandit learning or simulation with continuous actions.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors in each round, i.e., :math:`x_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ reward: array-like, shape (n_rounds,)\n+ Observed rewards (or outcome) in each round, i.e., :math:`r_t`.\n+\n+ expected_reward: array-like, shape (n_rounds, n_actions), default=None\n+ Expected rewards (or outcome) in each round, i.e., :math:`\\\\mathbb{E}[r_t]`.\n+\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+ if (\n+ not isinstance(action_by_behavior_policy, np.ndarray)\n+ or action_by_behavior_policy.ndim != 1\n+ ):\n+ raise ValueError(\"action_by_behavior_policy must be 1-dimensional ndarray\")\n+ if not isinstance(reward, np.ndarray) or reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional ndarray\")\n+\n+ if expected_reward is not None:\n+ if not isinstance(expected_reward, np.ndarray) or expected_reward.ndim != 1:\n+ raise ValueError(\"expected_reward must be 1-dimensional ndarray\")\n+ if not (\n+ context.shape[0]\n+ == action_by_behavior_policy.shape[0]\n+ == reward.shape[0]\n+ == expected_reward.shape[0]\n+ ):\n+ raise ValueError(\n+ \"context, action, reward, and expected_reward must be the same size.\"\n+ )\n+ if pscore is not None:\n+ if not isinstance(pscore, np.ndarray) or pscore.ndim != 1:\n+ raise ValueError(\"pscore must be 1-dimensional ndarray\")\n+ if not (\n+ context.shape[0]\n+ == action_by_behavior_policy.shape[0]\n+ == reward.shape[0]\n+ == pscore.shape[0]\n+ ):\n+ raise ValueError(\n+ \"context, action, reward, and pscore must be the same size.\"\n+ )\n+ if np.any(pscore <= 0):\n+ raise ValueError(\"pscore must be positive\")\n+\n+\n+def check_continuous_ope_inputs(\n+ action_by_evaluation_policy: np.ndarray,\n+ action_by_behavior_policy: Optional[np.ndarray] = None,\n+ reward: Optional[np.ndarray] = None,\n+ pscore: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+) -> Optional[ValueError]:\n+ \"\"\"Check inputs for OPE with continuous actions.\n+\n+ Parameters\n+ -----------\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,), default=None\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ reward: array-like, shape (n_rounds,), default=None\n+ Observed rewards (or outcome) in each round, i.e., :math:`r_t`.\n+\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds,), default=None\n+ Expected rewards given context and action estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ \"\"\"\n+ # action_by_evaluation_policy\n+ if (\n+ not isinstance(action_by_evaluation_policy, np.ndarray)\n+ or action_by_evaluation_policy.ndim != 1\n+ ):\n+ raise ValueError(\"action_by_evaluation_policy must be 1-dimensional ndarray\")\n+\n+ # estimated_rewards_by_reg_model\n+ if estimated_rewards_by_reg_model is not None:\n+ if not isinstance(estimated_rewards_by_reg_model, np.ndarray) or estimated_rewards_by_reg_model.ndim != 1:\n+ raise ValueError(\"estimated_rewards_by_reg_model must be 1-dimensional ndarray\")\n+ if estimated_rewards_by_reg_model.shape[0] != action_by_evaluation_policy.shape[0]:\n+ raise ValueError(\n+ \"estimated_rewards_by_reg_model and action_by_evaluation_policy must be the same size\"\n+ )\n+\n+ # action, reward\n+ if action_by_behavior_policy is not None or reward is not None:\n+ if (\n+ not isinstance(action_by_behavior_policy, np.ndarray)\n+ or action_by_behavior_policy.ndim != 1\n+ ):\n+ raise ValueError(\"action_by_behavior_policy must be 1-dimensional ndarray\")\n+ if not isinstance(reward, np.ndarray) or reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional ndarray\")\n+ if not (action_by_behavior_policy.shape[0] == reward.shape[0]):\n+ raise ValueError(\n+ \"action_by_behavior_policy and reward must be the same size\"\n+ )\n+ if not (\n+ action_by_behavior_policy.shape[0] == action_by_evaluation_policy.shape[0]\n+ ):\n+ raise ValueError(\n+ \"action_by_behavior_policy and action_by_evaluation_policy must be the same size\"\n+ )\n+\n+ # pscore\n+ if pscore is not None:\n+ if not isinstance(pscore, np.ndarray) or pscore.ndim != 1:\n+ raise ValueError(\"pscore must be 1-dimensional ndarray\")\n+ if not (\n+ action_by_behavior_policy.shape[0] == reward.shape[0] == pscore.shape[0]\n+ ):\n+ raise ValueError(\n+ \"action_by_behavior_policy, reward, and pscore must be the same size\"\n+ )\n+ if np.any(pscore <= 0):\n+ raise ValueError(\"pscore must be positive\")\n+\n+\ndef _check_slate_ope_inputs(\nslate_id: np.ndarray,\nreward: np.ndarray,\n@@ -342,6 +486,7 @@ def _check_slate_ope_inputs(\n\"\"\"Check inputs of Slate OPE estimators.\nParameters\n+ -----------\nslate_id: array-like, shape (<= n_rounds * len_list,)\nSlate id observed in each round of the logged bandit feedback.\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add some check funcs for continuous ope |
641,014 | 07.07.2021 10:18:30 | -32,400 | ce63f55866717e0155faf190336ae4f99176e3ad | add tests of meta_continuous | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ope/test_meta_continuous.py",
"diff": "+from typing import Dict, Optional\n+from dataclasses import dataclass\n+import itertools\n+from copy import deepcopy\n+\n+import pytest\n+import numpy as np\n+import pandas as pd\n+from pandas.testing import assert_frame_equal\n+\n+from obp.types import BanditFeedback\n+from obp.ope import (\n+ ContinuousOffPolicyEvaluation,\n+ BaseContinuousOffPolicyEstimator,\n+)\n+from obp.utils import check_confidence_interval_arguments\n+\n+\n+mock_policy_value = 0.5\n+mock_confidence_interval = {\n+ \"mean\": 0.5,\n+ \"95.0% CI (lower)\": 0.3,\n+ \"95.0% CI (upper)\": 0.7,\n+}\n+\n+\n+@dataclass\n+class KernelizedInverseProbabilityWeightingMock(BaseContinuousOffPolicyEstimator):\n+ \"\"\"Mock Kernelized Inverse Probability Weighting.\"\"\"\n+\n+ eps: float = 0.1\n+ estimator_name: str = \"ipw\"\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ return np.ones_like(reward)\n+\n+ def estimate_policy_value(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ **kwargs,\n+ ) -> float:\n+ \"\"\"Estimate policy value of an evaluation policy.\n+\n+ Returns\n+ ----------\n+ mock_policy_value: float\n+\n+ \"\"\"\n+ return mock_policy_value + self.eps\n+\n+ def estimate_interval(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+ ) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\n+\n+ Returns\n+ ----------\n+ mock_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\n+ check_confidence_interval_arguments(\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ return {k: v + self.eps for k, v in mock_confidence_interval.items()}\n+\n+\n+@dataclass\n+class KernelizedDoublyRobustMock(BaseContinuousOffPolicyEstimator):\n+ \"\"\"Mock Kernelized Doubly Robust.\"\"\"\n+\n+ estimator_name: str = \"kernelized_dr\"\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ return np.ones_like(reward)\n+\n+ def estimate_policy_value(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ **kwargs,\n+ ) -> float:\n+ \"\"\"Estimate policy value of an evaluation policy.\n+\n+ Returns\n+ ----------\n+ mock_policy_value: float\n+\n+ \"\"\"\n+ return mock_policy_value\n+\n+ def estimate_interval(\n+ self,\n+ reward: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ pscore: np.ndarray,\n+ action_by_evaluation_policy: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ alpha: float = 0.05,\n+ n_bootstrap_samples: int = 10000,\n+ random_state: Optional[int] = None,\n+ **kwargs,\n+ ) -> Dict[str, float]:\n+ \"\"\"Estimate confidence interval of policy value by nonparametric bootstrap procedure.\n+\n+ Returns\n+ ----------\n+ mock_confidence_interval: Dict[str, float]\n+ Dictionary storing the estimated mean and upper-lower confidence bounds.\n+\n+ \"\"\"\n+ check_confidence_interval_arguments(\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ return {k: v for k, v in mock_confidence_interval.items()}\n+\n+\n+# define Mock instances\n+ipw = KernelizedInverseProbabilityWeightingMock()\n+ipw2 = KernelizedInverseProbabilityWeightingMock(eps=0.02)\n+ipw3 = KernelizedInverseProbabilityWeightingMock(estimator_name=\"ipw3\")\n+dr = KernelizedDoublyRobustMock(estimator_name=\"dr\")\n+\n+\n+def test_meta_post_init(synthetic_continuous_bandit_feedback: BanditFeedback) -> None:\n+ \"\"\"\n+ Test the __post_init__ function\n+ \"\"\"\n+ # __post_init__ saves the latter estimator when the same estimator name is used\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[ipw, ipw2]\n+ )\n+ assert ope_.ope_estimators_ == {\"ipw\": ipw2}, \"__post_init__ returns a wrong value\"\n+ # __post_init__ can handle the same estimator if the estimator names are different\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[ipw, ipw3]\n+ )\n+ assert ope_.ope_estimators_ == {\n+ \"ipw\": ipw,\n+ \"ipw3\": ipw3,\n+ }, \"__post_init__ returns a wrong value\"\n+ # __post__init__ raises RuntimeError when necessary_keys are not included in the bandit_feedback\n+ necessary_keys = [\"action_by_behavior_policy\", \"reward\", \"pscore\"]\n+ for i in range(len(necessary_keys)):\n+ for deleted_keys in itertools.combinations(necessary_keys, i + 1):\n+ invalid_bandit_feedback_dict = {key: \"_\" for key in necessary_keys}\n+ # delete\n+ for k in deleted_keys:\n+ del invalid_bandit_feedback_dict[k]\n+ with pytest.raises(RuntimeError, match=r\"Missing key*\"):\n+ _ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=invalid_bandit_feedback_dict, ope_estimators=[ipw]\n+ )\n+\n+\n+# action_by_evaluation_policy, estimated_rewards_by_reg_model, description\n+invalid_input_of_create_estimator_inputs = [\n+ (\n+ np.zeros(5), #\n+ np.zeros(4), #\n+ \"estimated_rewards_by_reg_model.shape and action_by_evaluation_policy.shape must be the same\",\n+ ),\n+ (\n+ np.zeros(5),\n+ {\"dr\": np.zeros(4)},\n+ r\"estimated_rewards_by_reg_model\\[dr\\].shape and action_by_evaluation_policy.shape must be the same\",\n+ ),\n+ (\n+ np.zeros(5),\n+ {\"dr\": None},\n+ r\"estimated_rewards_by_reg_model\\[dr\\] must be ndarray\",\n+ ),\n+ (\n+ np.zeros((2, 3)),\n+ None,\n+ \"action_by_evaluation_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\"3\", None, \"action_by_evaluation_policy must be 1-dimensional ndarray\"),\n+ (None, None, \"action_by_evaluation_policy must be 1-dimensional ndarray\"),\n+]\n+\n+valid_input_of_create_estimator_inputs = [\n+ (\n+ np.zeros(5),\n+ np.zeros(5),\n+ \"same shape\",\n+ ),\n+ (\n+ np.zeros(5),\n+ {\"dr\": np.zeros(5)},\n+ \"same shape\",\n+ ),\n+ (np.zeros(5), None, \"estimated_rewards_by_reg_model is None\"),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, description\",\n+ invalid_input_of_create_estimator_inputs,\n+)\n+def test_meta_create_estimator_inputs_using_invalid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ description: str,\n+ synthetic_continuous_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the _create_estimator_inputs using valid data\n+ \"\"\"\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[ipw]\n+ )\n+ # raise ValueError when the shape of two arrays are different\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_._create_estimator_inputs(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ # _create_estimator_inputs function is called in the following functions\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.estimate_policy_values(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.estimate_intervals(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.summarize_off_policy_estimates(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=0.1,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ope_.summarize_estimators_comparison(\n+ ground_truth_policy_value=0.1,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, description\",\n+ valid_input_of_create_estimator_inputs,\n+)\n+def test_meta_create_estimator_inputs_using_valid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ description: str,\n+ synthetic_continuous_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the _create_estimator_inputs using invalid data\n+ \"\"\"\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[ipw]\n+ )\n+ estimator_inputs = ope_._create_estimator_inputs(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ assert set(estimator_inputs.keys()) == set([\"ipw\"])\n+ assert set(estimator_inputs[\"ipw\"].keys()) == set(\n+ [\n+ \"reward\",\n+ \"action_by_behavior_policy\",\n+ \"pscore\",\n+ \"action_by_evaluation_policy\",\n+ \"estimated_rewards_by_reg_model\",\n+ ]\n+ ), f\"Invalid response of _create_estimator_inputs (test case: {description})\"\n+ # _create_estimator_inputs function is called in the following functions\n+ _ = ope_.estimate_policy_values(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = ope_.estimate_intervals(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = ope_.summarize_off_policy_estimates(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = ope_.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=0.1,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = ope_.summarize_estimators_comparison(\n+ ground_truth_policy_value=0.1,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, description\",\n+ valid_input_of_create_estimator_inputs,\n+)\n+def test_meta_estimate_policy_values_using_valid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ description: str,\n+ synthetic_continuous_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of estimate_policy_values using valid data\n+ \"\"\"\n+ # single ope estimator\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[dr]\n+ )\n+ assert ope_.estimate_policy_values(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ ) == {\n+ \"dr\": mock_policy_value\n+ }, \"OffPolicyEvaluation.estimate_policy_values ([DoublyRobust]) returns a wrong value\"\n+ # multiple ope estimators\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[dr, ipw]\n+ )\n+ assert ope_.estimate_policy_values(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ ) == {\n+ \"dr\": mock_policy_value,\n+ \"ipw\": mock_policy_value + ipw.eps,\n+ }, \"OffPolicyEvaluation.estimate_policy_values ([DoublyRobust, IPW]) returns a wrong value\"\n+\n+\n+# alpha, n_bootstrap_samples, random_state, description\n+invalid_input_of_estimate_intervals = [\n+ (0.05, 100, \"s\", \"random_state must be an integer\"),\n+ (0.05, -1, 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.05, \"s\", 1, \"n_bootstrap_samples must be a positive integer\"),\n+ (0.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (1.0, 1, 1, \"alpha must be a positive float (< 1)\"),\n+ (\"0\", 1, 1, \"alpha must be a positive float (< 1)\"),\n+]\n+\n+valid_input_of_estimate_intervals = [\n+ (0.05, 100, 1, \"random_state is 1\"),\n+ (0.05, 1, 1, \"n_bootstrap_samples is 1\"),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description_2\",\n+ invalid_input_of_estimate_intervals,\n+)\n+def test_meta_estimate_intervals_using_invalid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ alpha,\n+ n_bootstrap_samples,\n+ random_state,\n+ description_2: str,\n+ synthetic_continuous_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of estimate_intervals using invalid data\n+ \"\"\"\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[dr]\n+ )\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = ope_.estimate_intervals(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ # estimate_intervals function is called in summarize_off_policy_estimates\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = ope_.summarize_off_policy_estimates(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description_2\",\n+ valid_input_of_estimate_intervals,\n+)\n+def test_meta_estimate_intervals_using_valid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ alpha: float,\n+ n_bootstrap_samples: int,\n+ random_state: int,\n+ description_2: str,\n+ synthetic_continuous_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of estimate_intervals using valid data\n+ \"\"\"\n+ # single ope estimator\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[dr]\n+ )\n+ assert ope_.estimate_intervals(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ ) == {\n+ \"dr\": mock_confidence_interval\n+ }, \"OffPolicyEvaluation.estimate_intervals ([DoublyRobust]) returns a wrong value\"\n+ # multiple ope estimators\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[dr, ipw]\n+ )\n+ assert ope_.estimate_intervals(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ ) == {\n+ \"dr\": mock_confidence_interval,\n+ \"ipw\": {k: v + ipw.eps for k, v in mock_confidence_interval.items()},\n+ }, \"OffPolicyEvaluation.estimate_intervals ([DoublyRobust, IPW]) returns a wrong value\"\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"alpha, n_bootstrap_samples, random_state, description_2\",\n+ valid_input_of_estimate_intervals,\n+)\n+def test_meta_summarize_off_policy_estimates(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ alpha: float,\n+ n_bootstrap_samples: int,\n+ random_state: int,\n+ description_2: str,\n+ synthetic_continuous_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of summarize_off_policy_estimates using valid data\n+ \"\"\"\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[ipw, ipw3]\n+ )\n+ value, interval = ope_.summarize_off_policy_estimates(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ expected_value = pd.DataFrame(\n+ {\n+ \"ipw\": mock_policy_value + ipw.eps,\n+ \"ipw3\": mock_policy_value + ipw3.eps,\n+ },\n+ index=[\"estimated_policy_value\"],\n+ ).T\n+ expected_value[\"relative_estimated_policy_value\"] = (\n+ expected_value[\"estimated_policy_value\"]\n+ / synthetic_continuous_bandit_feedback[\"reward\"].mean()\n+ )\n+ expected_interval = pd.DataFrame(\n+ {\n+ \"ipw\": {k: v + ipw.eps for k, v in mock_confidence_interval.items()},\n+ \"ipw3\": {k: v + ipw3.eps for k, v in mock_confidence_interval.items()},\n+ }\n+ ).T\n+ assert_frame_equal(value, expected_value), \"Invalid summarization (policy value)\"\n+ assert_frame_equal(interval, expected_interval), \"Invalid summarization (interval)\"\n+ # check relative estimated policy value when the average of bandit_feedback[\"reward\"] is zero\n+ zero_reward_bandit_feedback = deepcopy(synthetic_continuous_bandit_feedback)\n+ zero_reward_bandit_feedback[\"reward\"] = np.zeros(\n+ zero_reward_bandit_feedback[\"reward\"].shape[0]\n+ )\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=zero_reward_bandit_feedback, ope_estimators=[ipw, ipw3]\n+ )\n+ value, _ = ope_.summarize_off_policy_estimates(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ expected_value = pd.DataFrame(\n+ {\n+ \"ipw\": mock_policy_value + ipw.eps,\n+ \"ipw3\": mock_policy_value + ipw3.eps,\n+ },\n+ index=[\"estimated_policy_value\"],\n+ ).T\n+ expected_value[\"relative_estimated_policy_value\"] = np.nan\n+ assert_frame_equal(value, expected_value), \"Invalid summarization (policy value)\"\n+\n+\n+invalid_input_of_evaluation_performance_of_estimators = [\n+ (\"foo\", 0.3, \"metric must be either 'relative-ee' or 'se'\"),\n+ (\"se\", 1, \"ground_truth_policy_value must be a float\"),\n+ (\"se\", \"a\", \"ground_truth_policy_value must be a float\"),\n+ (\n+ \"relative-ee\",\n+ 0.0,\n+ \"ground_truth_policy_value must be non-zero when metric is relative-ee\",\n+ ),\n+]\n+\n+valid_input_of_evaluation_performance_of_estimators = [\n+ (\"se\", 0.0, \"metric is se and ground_truth_policy_value is 0.0\"),\n+ (\"relative-ee\", 1.0, \"metric is relative-ee and ground_truth_policy_value is 1.0\"),\n+]\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"metric, ground_truth_policy_value, description_2\",\n+ invalid_input_of_evaluation_performance_of_estimators,\n+)\n+def test_meta_evaluate_performance_of_estimators_using_invalid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ metric,\n+ ground_truth_policy_value,\n+ description_2: str,\n+ synthetic_continuous_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of evaluate_performance_of_estimators using invalid data\n+ \"\"\"\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[dr]\n+ )\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = ope_.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ metric=metric,\n+ )\n+ # estimate_intervals function is called in summarize_off_policy_estimates\n+ with pytest.raises(ValueError, match=f\"{description_2}*\"):\n+ _ = ope_.summarize_estimators_comparison(\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ metric=metric,\n+ )\n+\n+\[email protected](\n+ \"action_by_evaluation_policy, estimated_rewards_by_reg_model, description_1\",\n+ valid_input_of_create_estimator_inputs,\n+)\[email protected](\n+ \"metric, ground_truth_policy_value, description_2\",\n+ valid_input_of_evaluation_performance_of_estimators,\n+)\n+def test_meta_evaluate_performance_of_estimators_using_valid_input_data(\n+ action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model,\n+ description_1: str,\n+ metric,\n+ ground_truth_policy_value,\n+ description_2: str,\n+ synthetic_continuous_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the response of evaluate_performance_of_estimators using valid data\n+ \"\"\"\n+ if metric == \"relative-ee\":\n+ # calculate relative-ee\n+ eval_metric_ope_dict = {\n+ \"ipw\": np.abs(\n+ (mock_policy_value + ipw.eps - ground_truth_policy_value)\n+ / ground_truth_policy_value\n+ ),\n+ \"ipw3\": np.abs(\n+ (mock_policy_value + ipw3.eps - ground_truth_policy_value)\n+ / ground_truth_policy_value\n+ ),\n+ }\n+ else:\n+ # calculate se\n+ eval_metric_ope_dict = {\n+ \"ipw\": (mock_policy_value + ipw.eps - ground_truth_policy_value) ** 2,\n+ \"ipw3\": (mock_policy_value + ipw3.eps - ground_truth_policy_value) ** 2,\n+ }\n+ # check performance estimators\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_continuous_bandit_feedback, ope_estimators=[ipw, ipw3]\n+ )\n+ performance = ope_.evaluate_performance_of_estimators(\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ metric=metric,\n+ )\n+ for k, v in performance.items():\n+ assert k in eval_metric_ope_dict, \"Invalid key of performance response\"\n+ assert v == eval_metric_ope_dict[k], \"Invalid value of performance response\"\n+ performance_df = ope_.summarize_estimators_comparison(\n+ ground_truth_policy_value=ground_truth_policy_value,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ metric=metric,\n+ )\n+ assert_frame_equal(\n+ performance_df, pd.DataFrame(eval_metric_ope_dict, index=[metric]).T\n+ ), \"Invalid summarization (performance)\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add tests of meta_continuous |
641,014 | 07.07.2021 10:19:34 | -32,400 | 6ee2fde20ad9ed66157aa9a441787a4129877772 | add synthetic_continuous_bandit_feedback | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/conftest.py",
"new_path": "tests/ope/conftest.py",
"diff": "@@ -13,6 +13,7 @@ from obp.dataset import (\nlogistic_reward_function,\nlinear_behavior_policy,\nSyntheticSlateBanditDataset,\n+ SyntheticContinuousBanditDataset,\n)\nfrom obp.utils import sigmoid\n@@ -20,7 +21,7 @@ from obp.utils import sigmoid\nos.environ[\"PY_IGNORE_IMPORTMISMATCH\"] = \"1\"\n-# generate synthetic dataset using SyntheticBanditDataset\n+# generate synthetic bandit dataset using SyntheticBanditDataset\[email protected](scope=\"session\")\ndef synthetic_bandit_feedback() -> BanditFeedback:\nn_actions = 10\n@@ -38,7 +39,7 @@ def synthetic_bandit_feedback() -> BanditFeedback:\nreturn bandit_feedback\n-# generate synthetic slate dataset using SyntheticBanditDataset\n+# generate synthetic slate bandit dataset using SyntheticSlateBanditDataset\[email protected](scope=\"session\")\ndef synthetic_slate_bandit_feedback() -> BanditFeedback:\n# set parameters\n@@ -60,6 +61,28 @@ def synthetic_slate_bandit_feedback() -> BanditFeedback:\nreturn bandit_feedback\n+# generate synthetic continuous bandit dataset using SyntheticContinuousBanditDataset\[email protected](scope=\"session\")\n+def synthetic_continuous_bandit_feedback() -> BanditFeedback:\n+ # set parameters\n+ dim_context = 2\n+ random_state = 12345\n+ n_rounds = 100\n+ min_action_value = -10\n+ max_action_value = 10\n+ dataset = SyntheticContinuousBanditDataset(\n+ dim_context=dim_context,\n+ random_state=random_state,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\n+ )\n+ return bandit_feedback\n+\n+\n# make the expected reward of synthetic bandit feedback close to that of the Open Bandit Dataset\[email protected](scope=\"session\")\ndef fixed_synthetic_bandit_feedback(synthetic_bandit_feedback) -> BanditFeedback:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add synthetic_continuous_bandit_feedback |
641,014 | 07.07.2021 21:06:23 | -32,400 | 1e04f89055281813a3e3d82c338a879af666dd80 | add tests of ContinuousNNPolicyLearner | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/policy/test_offline_continuous.py",
"diff": "+import pytest\n+import numpy as np\n+import torch\n+\n+from obp.policy.offline_continuous import ContinuousNNPolicyLearner\n+\n+\n+# dim_context, pg_method, bandwidth, output_space, hidden_layer_size, activation, solver, alpha,\n+# batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum,\n+# early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, max_fun, q_func_estimator_hyperparams, description\n+invalid_input_of_nn_policy_learner_init = [\n+ (\n+ 0, #\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"dim_context must be a positive integer\",\n+ ),\n+ (\n+ 10,\n+ \"dm\", #\n+ 2,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"pg_method must be one of 'dgp', 'ipw', or 'dr-d'\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ -0.1, #\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"bandwidth must be a positive float\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (\"\", \"\"), #\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"output_space must be tuple of integers or floats\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, \"\"), #\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"hidden_layer_size must be tuple of positive integers\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"sigmoid\", #\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"activation must be one of 'identity', 'logistic', 'tanh', 'relu', or 'elu'\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adagrad\", #\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"solver must be one of 'adam', 'lbfgs', or 'sgd'\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ -1, #\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"alpha must be a non-negative float\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ 0, #\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"batch_size must be a positive integer or 'auto'\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0, #\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"learning_rate_init must be a positive float\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 0, #\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"max_iter must be a positive integer\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ None, #\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"shuffle must be a bool\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ \"\", #\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"random_state must be None or an integer\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ -1, #\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"tol must be a positive float\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 2, #\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"momentum must be a float in [0., 1.]\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ \"\", #\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"nesterovs_momentum must be a bool\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ None, #\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"early_stopping must be a bool\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"lbfgs\", #\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True, #\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"if early_stopping is True, solver must be one of 'sgd' or 'adam'\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 2, #\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"validation_fraction must be a float in\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 2, #\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"beta_1 must be a float in [0. 1.]\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 2, #\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"beta_2 must be a float in [0., 1.]\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ -1, #\n+ 10,\n+ 15000,\n+ None,\n+ \"epsilon must be a non-negative float\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 0, #\n+ 15000,\n+ None,\n+ \"n_iter_no_change must be a positive integer\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 0, #\n+ None,\n+ \"max_fun must be a positive integer\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ \"\", #\n+ \"q_func_estimator_hyperparams must be a dict,\",\n+ ),\n+]\n+\n+valid_input_of_nn_policy_learner_init = [\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"valid input\",\n+ ),\n+ (\n+ 10,\n+ \"dpg\",\n+ None,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ None,\n+ \"valid input\",\n+ ),\n+ (\n+ 10,\n+ \"ipw\",\n+ 0.1,\n+ (-10, 10),\n+ (100, 50, 100),\n+ \"logistic\",\n+ \"sgd\",\n+ 0.001,\n+ 50,\n+ 0.0001,\n+ 200,\n+ True,\n+ None,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ 15000,\n+ {},\n+ \"valid input\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"dim_context, pg_method, bandwidth, output_space, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, max_fun, q_func_estimator_hyperparams, description\",\n+ invalid_input_of_nn_policy_learner_init,\n+)\n+def test_nn_policy_learner_init_using_invalid_inputs(\n+ dim_context,\n+ pg_method,\n+ bandwidth,\n+ output_space,\n+ hidden_layer_size,\n+ activation,\n+ solver,\n+ alpha,\n+ batch_size,\n+ learning_rate_init,\n+ max_iter,\n+ shuffle,\n+ random_state,\n+ tol,\n+ momentum,\n+ nesterovs_momentum,\n+ early_stopping,\n+ validation_fraction,\n+ beta_1,\n+ beta_2,\n+ epsilon,\n+ n_iter_no_change,\n+ max_fun,\n+ q_func_estimator_hyperparams,\n+ description,\n+):\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = ContinuousNNPolicyLearner(\n+ dim_context=dim_context,\n+ pg_method=pg_method,\n+ bandwidth=bandwidth,\n+ output_space=output_space,\n+ hidden_layer_size=hidden_layer_size,\n+ activation=activation,\n+ solver=solver,\n+ alpha=alpha,\n+ batch_size=batch_size,\n+ learning_rate_init=learning_rate_init,\n+ max_iter=max_iter,\n+ shuffle=shuffle,\n+ random_state=random_state,\n+ tol=tol,\n+ momentum=momentum,\n+ nesterovs_momentum=nesterovs_momentum,\n+ early_stopping=early_stopping,\n+ validation_fraction=validation_fraction,\n+ beta_1=beta_1,\n+ beta_2=beta_2,\n+ epsilon=epsilon,\n+ n_iter_no_change=n_iter_no_change,\n+ max_fun=max_fun,\n+ q_func_estimator_hyperparams=q_func_estimator_hyperparams,\n+ )\n+\n+\[email protected](\n+ \"dim_context, pg_method, bandwidth, output_space, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, max_fun, q_func_estimator_hyperparams, description\",\n+ valid_input_of_nn_policy_learner_init,\n+)\n+def test_nn_policy_learner_init_using_valid_inputs(\n+ dim_context,\n+ pg_method,\n+ bandwidth,\n+ output_space,\n+ hidden_layer_size,\n+ activation,\n+ solver,\n+ alpha,\n+ batch_size,\n+ learning_rate_init,\n+ max_iter,\n+ shuffle,\n+ random_state,\n+ tol,\n+ momentum,\n+ nesterovs_momentum,\n+ early_stopping,\n+ validation_fraction,\n+ beta_1,\n+ beta_2,\n+ epsilon,\n+ n_iter_no_change,\n+ max_fun,\n+ q_func_estimator_hyperparams,\n+ description,\n+):\n+ nn_policy_learner = ContinuousNNPolicyLearner(\n+ dim_context=dim_context,\n+ pg_method=pg_method,\n+ bandwidth=bandwidth,\n+ output_space=output_space,\n+ hidden_layer_size=hidden_layer_size,\n+ activation=activation,\n+ solver=solver,\n+ alpha=alpha,\n+ batch_size=batch_size,\n+ learning_rate_init=learning_rate_init,\n+ max_iter=max_iter,\n+ shuffle=shuffle,\n+ random_state=random_state,\n+ tol=tol,\n+ momentum=momentum,\n+ nesterovs_momentum=nesterovs_momentum,\n+ early_stopping=early_stopping,\n+ validation_fraction=validation_fraction,\n+ beta_1=beta_1,\n+ beta_2=beta_2,\n+ epsilon=epsilon,\n+ n_iter_no_change=n_iter_no_change,\n+ max_fun=max_fun,\n+ q_func_estimator_hyperparams=q_func_estimator_hyperparams,\n+ )\n+ assert isinstance(nn_policy_learner, ContinuousNNPolicyLearner)\n+\n+\n+def test_nn_policy_learner_create_train_data_for_opl():\n+ context = np.ones((100, 2), dtype=np.int32)\n+ action_by_behavior_policy = np.zeros(100, dtype=np.int32)\n+ reward = np.ones((100,), dtype=np.float32)\n+ pscore = np.array([0.5] * 100, dtype=np.float32)\n+\n+ learner1 = ContinuousNNPolicyLearner(dim_context=2, pg_method=\"dpg\")\n+ training_loader, validation_loader = learner1._create_train_data_for_opl(\n+ context=context,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+ assert isinstance(training_loader, torch.utils.data.DataLoader)\n+ assert validation_loader is None\n+\n+ learner2 = ContinuousNNPolicyLearner(\n+ dim_context=2,\n+ pg_method=\"dpg\",\n+ early_stopping=True,\n+ )\n+\n+ training_loader, validation_loader = learner2._create_train_data_for_opl(\n+ context=context,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+ assert isinstance(training_loader, torch.utils.data.DataLoader)\n+ assert isinstance(validation_loader, torch.utils.data.DataLoader)\n+\n+\n+# context, action_by_behavior_policy, reward, pscore, description\n+invalid_input_of_nn_policy_learner_fit = [\n+ (\n+ 5, #\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5) * 0.5,\n+ \"context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones(5), #\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5) * 0.5,\n+ \"context must be 2-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ 5, #\n+ np.ones(5),\n+ np.ones(5) * 0.5,\n+ \"action_by_behavior_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones((5, 2)), #\n+ np.ones(5),\n+ np.ones(5) * 0.5,\n+ \"action_by_behavior_policy must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones(5),\n+ 5, #\n+ np.ones(5) * 0.5,\n+ \"reward must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones(5),\n+ np.ones((5, 2)), #\n+ np.ones(5) * 0.5,\n+ \"reward must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones(5),\n+ np.ones(5),\n+ 0.5, #\n+ \"pscore must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones((5, 2)) * 0.5, #\n+ \"pscore must be 1-dimensional ndarray\",\n+ ),\n+ (\n+ np.ones((4, 2)), #\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5) * 0.5,\n+ \"context, action_by_behavior_policy, reward, and pscore must be the same size\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones(4), #\n+ np.ones(5),\n+ np.ones(5) * 0.5,\n+ \"context, action_by_behavior_policy, reward, and pscore must be the same size\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones(5),\n+ np.ones(4), #\n+ np.ones(5) * 0.5,\n+ \"context, action_by_behavior_policy, reward, and pscore must be the same size\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones(5),\n+ np.ones(5),\n+ np.arange(5) * 0.1, #\n+ \"pscore must be positive\",\n+ ),\n+ (\n+ np.ones((5, 3)), #\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5) * 0.5,\n+ \"the second dimension of context must be equal to dim_context\",\n+ ),\n+]\n+\n+valid_input_of_nn_policy_learner_fit = [\n+ (\n+ np.ones((5, 2)),\n+ np.ones(5),\n+ np.ones(5),\n+ np.ones(5) * 0.5,\n+ \"valid input (pscore is given)\",\n+ ),\n+ (\n+ np.ones((5, 2)),\n+ np.ones(5),\n+ np.ones(5),\n+ None,\n+ \"valid input (pscore is not given)\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"context, action_by_behavior_policy, reward, pscore, description\",\n+ invalid_input_of_nn_policy_learner_fit,\n+)\n+def test_nn_policy_learner_fit_using_invalid_inputs(\n+ context,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+ description,\n+):\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ # set parameters\n+ dim_context = 2\n+ pg_method = \"dpg\"\n+ learner = ContinuousNNPolicyLearner(\n+ dim_context=dim_context, pg_method=pg_method\n+ )\n+ learner.fit(\n+ context=context,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+\[email protected](\n+ \"context, action_by_behavior_policy, reward, pscore, description\",\n+ valid_input_of_nn_policy_learner_fit,\n+)\n+def test_nn_policy_learner_fit_using_valid_inputs(\n+ context,\n+ action_by_behavior_policy,\n+ reward,\n+ pscore,\n+ description,\n+):\n+ # set parameters\n+ dim_context = 2\n+ pg_method = \"dpg\"\n+ learner = ContinuousNNPolicyLearner(dim_context=dim_context, pg_method=pg_method)\n+ learner.fit(\n+ context=context,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ pscore=pscore,\n+ )\n+\n+\n+def test_nn_policy_learner_predict():\n+ # synthetic data\n+ context = np.ones((5, 2))\n+ action_by_behavior_policy = np.ones(5)\n+ reward = np.ones(5)\n+\n+ # set parameters\n+ dim_context = 2\n+ pg_method = \"dpg\"\n+ output_space = (-10, 10)\n+ learner = ContinuousNNPolicyLearner(\n+ dim_context=dim_context, pg_method=pg_method, output_space=output_space\n+ )\n+ learner.fit(\n+ context=context,\n+ action_by_behavior_policy=action_by_behavior_policy,\n+ reward=reward,\n+ )\n+\n+ # shape error\n+ with pytest.raises(ValueError, match=\"context must be 2-dimensional ndarray\"):\n+ learner.predict(context=np.ones(5))\n+\n+ with pytest.raises(ValueError, match=\"context must be 2-dimensional ndarray\"):\n+ learner.predict(context=\"np.ones(5)\")\n+\n+ # inconsistency between dim_context and context\n+ with pytest.raises(\n+ ValueError, match=\"the second dimension of context must be equal to dim_context\"\n+ ):\n+ learner.predict(context=np.ones((5, 3)))\n+\n+ # check output shape\n+ predicted_actions = learner.predict(context=context)\n+ assert predicted_actions.shape[0] == context.shape[0]\n+ assert predicted_actions.ndim == 1\n+ assert np.all(output_space[0] <= predicted_actions) or np.all(\n+ predicted_actions <= output_space[1]\n+ )\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/policy/test_offline_learner_continuous_performance.py",
"diff": "+from typing import Tuple, Union, Optional\n+\n+import numpy as np\n+from joblib import Parallel, delayed\n+import pytest\n+from dataclasses import dataclass\n+from obp.dataset import (\n+ SyntheticContinuousBanditDataset,\n+ linear_reward_funcion_continuous,\n+ linear_behavior_policy_continuous,\n+)\n+from obp.policy import BaseContinuousOfflinePolicyLearner, ContinuousNNPolicyLearner\n+\n+\n+# n_rounds, dim_context, action_noise, reward_noise, min_action_value, max_action_value, pg_method, bandwidth\n+offline_experiment_configurations = [\n+ (\n+ 1500,\n+ 10,\n+ 1.0,\n+ 1.0,\n+ -10.0,\n+ 10.0,\n+ \"dpg\",\n+ None,\n+ ),\n+ (\n+ 2000,\n+ 5,\n+ 1.0,\n+ 1.0,\n+ 0.0,\n+ 100.0,\n+ \"dpg\",\n+ None,\n+ ),\n+]\n+\n+\n+@dataclass\n+class RandomPolicy(BaseContinuousOfflinePolicyLearner):\n+ output_space: Tuple[Union[int, float], Union[int, float]] = None\n+\n+ def fit(self):\n+ raise NotImplementedError\n+\n+ def predict(self, context: np.ndarray) -> np.ndarray:\n+\n+ n_rounds = context.shape[0]\n+ predicted_actions = np.random.uniform(\n+ self.output_space[0], self.output_space[1], size=n_rounds\n+ )\n+ return predicted_actions\n+\n+\[email protected](\n+ \"n_rounds, dim_context, action_noise, reward_noise, min_action_value, max_action_value, pg_method, bandwidth\",\n+ offline_experiment_configurations,\n+)\n+def test_offline_nn_policy_learner_performance(\n+ n_rounds: int,\n+ dim_context: int,\n+ action_noise: float,\n+ reward_noise: float,\n+ min_action_value: float,\n+ max_action_value: float,\n+ pg_method: str,\n+ bandwidth: Optional[float],\n+) -> None:\n+ def process(i: int):\n+ # synthetic data generator\n+ dataset = SyntheticContinuousBanditDataset(\n+ dim_context=dim_context,\n+ reward_function=linear_reward_funcion_continuous,\n+ behavior_policy_function=linear_behavior_policy_continuous,\n+ random_state=i,\n+ )\n+ # define evaluation policy using NNPolicyLearner\n+ nn_policy = ContinuousNNPolicyLearner(\n+ dim_context=dim_context,\n+ pg_method=pg_method,\n+ bandwidth=bandwidth,\n+ output_space=(min_action_value, max_action_value),\n+ hidden_layer_size=(10, 10),\n+ learning_rate_init=0.001,\n+ solver=\"sgd\",\n+ )\n+ # baseline method 1. RandomPolicy\n+ random_policy = RandomPolicy(output_space=(min_action_value, max_action_value))\n+ # sample new training and test sets of synthetic logged bandit feedback\n+ bandit_feedback_train = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ action_noise=action_noise,\n+ reward_noise=reward_noise,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\n+ )\n+ bandit_feedback_test = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ action_noise=action_noise,\n+ reward_noise=reward_noise,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\n+ )\n+ # train the evaluation policy on the training set of the synthetic logged bandit feedback\n+ nn_policy.fit(\n+ context=bandit_feedback_train[\"context\"],\n+ action_by_behavior_policy=bandit_feedback_train[\"action\"],\n+ reward=bandit_feedback_train[\"reward\"],\n+ pscore=bandit_feedback_train[\"pscore\"],\n+ )\n+ # predict the action decisions for the test set of the synthetic logged bandit feedback\n+ actions_predicted_by_nn_policy = nn_policy.predict(\n+ context=bandit_feedback_test[\"context\"],\n+ )\n+ actions_predicted_by_random = random_policy.predict(\n+ context=bandit_feedback_test[\"context\"],\n+ )\n+ # get the ground truth policy value for each learner\n+ gt_nn_policy_learner = dataset.calc_ground_truth_policy_value(\n+ context=bandit_feedback_test[\"context\"],\n+ action=actions_predicted_by_nn_policy,\n+ )\n+ gt_random_policy = dataset.calc_ground_truth_policy_value(\n+ context=bandit_feedback_test[\"context\"],\n+ action=actions_predicted_by_random,\n+ )\n+\n+ return gt_nn_policy_learner, gt_random_policy\n+\n+ n_runs = 10\n+ processed = Parallel(\n+ n_jobs=1, # PyTorch uses multiple threads\n+ verbose=0,\n+ )([delayed(process)(i) for i in np.arange(n_runs)])\n+ list_gt_nn_policy, list_gt_random = [], []\n+ for i, ground_truth_policy_values in enumerate(processed):\n+ gt_nn_policy, gt_random = ground_truth_policy_values\n+ list_gt_nn_policy.append(gt_nn_policy)\n+ list_gt_random.append(gt_random)\n+\n+ assert np.mean(list_gt_nn_policy) > np.mean(list_gt_random)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline_learner_performance.py",
"new_path": "tests/policy/test_offline_learner_performance.py",
"diff": "+from typing import Optional\n+from typing import Tuple\n+\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom sklearn.experimental import enable_hist_gradient_boosting # noqa\n@@ -8,9 +11,6 @@ from dataclasses import dataclass\nfrom obp.policy.base import BaseOfflinePolicyLearner\nfrom sklearn.base import clone, ClassifierMixin, is_classifier\n-from typing import Optional\n-from typing import Tuple\n-\nfrom obp.dataset import (\nSyntheticBanditDataset,\nlinear_behavior_policy,\n@@ -48,6 +48,7 @@ base_model_dict = dict(\nrandom_forest=RandomForestClassifier,\n)\n+# n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model\noffline_experiment_configurations = [\n(\n600,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add tests of ContinuousNNPolicyLearner |
641,014 | 07.07.2021 21:40:50 | -32,400 | d92a4762e40ec08e04374770f7515241e5e92fff | add some check functions | [
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -331,6 +331,158 @@ def check_ope_inputs(\nraise ValueError(\"pscore must be positive\")\n+def check_continuous_bandit_feedback_inputs(\n+ context: np.ndarray,\n+ action_by_behavior_policy: np.ndarray,\n+ reward: np.ndarray,\n+ expected_reward: Optional[np.ndarray] = None,\n+ pscore: Optional[np.ndarray] = None,\n+) -> Optional[ValueError]:\n+ \"\"\"Check inputs for bandit learning or simulation with continuous actions.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors in each round, i.e., :math:`x_t`.\n+\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ reward: array-like, shape (n_rounds,)\n+ Observed rewards (or outcome) in each round, i.e., :math:`r_t`.\n+\n+ expected_reward: array-like, shape (n_rounds, n_actions), default=None\n+ Expected rewards (or outcome) in each round, i.e., :math:`\\\\mathbb{E}[r_t]`.\n+\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ \"\"\"\n+ if not isinstance(context, np.ndarray) or context.ndim != 2:\n+ raise ValueError(\"context must be 2-dimensional ndarray\")\n+ if (\n+ not isinstance(action_by_behavior_policy, np.ndarray)\n+ or action_by_behavior_policy.ndim != 1\n+ ):\n+ raise ValueError(\"action_by_behavior_policy must be 1-dimensional ndarray\")\n+ if not isinstance(reward, np.ndarray) or reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional ndarray\")\n+\n+ if expected_reward is not None:\n+ if not isinstance(expected_reward, np.ndarray) or expected_reward.ndim != 1:\n+ raise ValueError(\"expected_reward must be 1-dimensional ndarray\")\n+ if not (\n+ context.shape[0]\n+ == action_by_behavior_policy.shape[0]\n+ == reward.shape[0]\n+ == expected_reward.shape[0]\n+ ):\n+ raise ValueError(\n+ \"context, action_by_behavior_policy, reward, and expected_reward must be the same size.\"\n+ )\n+ if pscore is not None:\n+ if not isinstance(pscore, np.ndarray) or pscore.ndim != 1:\n+ raise ValueError(\"pscore must be 1-dimensional ndarray\")\n+ if not (\n+ context.shape[0]\n+ == action_by_behavior_policy.shape[0]\n+ == reward.shape[0]\n+ == pscore.shape[0]\n+ ):\n+ raise ValueError(\n+ \"context, action_by_behavior_policy, reward, and pscore must be the same size.\"\n+ )\n+ if np.any(pscore <= 0):\n+ raise ValueError(\"pscore must be positive\")\n+\n+\n+def check_continuous_ope_inputs(\n+ action_by_evaluation_policy: np.ndarray,\n+ action_by_behavior_policy: Optional[np.ndarray] = None,\n+ reward: Optional[np.ndarray] = None,\n+ pscore: Optional[np.ndarray] = None,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+) -> Optional[ValueError]:\n+ \"\"\"Check inputs for OPE with continuous actions.\n+\n+ Parameters\n+ -----------\n+ action_by_behavior_policy: array-like, shape (n_rounds,)\n+ Continuous action values sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ action_by_evaluation_policy: array-like, shape (n_rounds,), default=None\n+ Continuous action values given by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(x_t)`.\n+\n+ reward: array-like, shape (n_rounds,), default=None\n+ Observed rewards (or outcome) in each round, i.e., :math:`r_t`.\n+\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Probability densities of the continuous action values sampled by a behavior policy\n+ (generalized propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds,), default=None\n+ Expected rewards given context and action estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ \"\"\"\n+ # action_by_evaluation_policy\n+ if (\n+ not isinstance(action_by_evaluation_policy, np.ndarray)\n+ or action_by_evaluation_policy.ndim != 1\n+ ):\n+ raise ValueError(\"action_by_evaluation_policy must be 1-dimensional ndarray\")\n+\n+ # estimated_rewards_by_reg_model\n+ if estimated_rewards_by_reg_model is not None:\n+ if (\n+ not isinstance(estimated_rewards_by_reg_model, np.ndarray)\n+ or estimated_rewards_by_reg_model.ndim != 1\n+ ):\n+ raise ValueError(\n+ \"estimated_rewards_by_reg_model must be 1-dimensional ndarray\"\n+ )\n+ if (\n+ estimated_rewards_by_reg_model.shape[0]\n+ != action_by_evaluation_policy.shape[0]\n+ ):\n+ raise ValueError(\n+ \"estimated_rewards_by_reg_model and action_by_evaluation_policy must be the same size\"\n+ )\n+\n+ # action, reward\n+ if action_by_behavior_policy is not None or reward is not None:\n+ if (\n+ not isinstance(action_by_behavior_policy, np.ndarray)\n+ or action_by_behavior_policy.ndim != 1\n+ ):\n+ raise ValueError(\"action_by_behavior_policy must be 1-dimensional ndarray\")\n+ if not isinstance(reward, np.ndarray) or reward.ndim != 1:\n+ raise ValueError(\"reward must be 1-dimensional ndarray\")\n+ if not (action_by_behavior_policy.shape[0] == reward.shape[0]):\n+ raise ValueError(\n+ \"action_by_behavior_policy and reward must be the same size\"\n+ )\n+ if not (\n+ action_by_behavior_policy.shape[0] == action_by_evaluation_policy.shape[0]\n+ ):\n+ raise ValueError(\n+ \"action_by_behavior_policy and action_by_evaluation_policy must be the same size\"\n+ )\n+\n+ # pscore\n+ if pscore is not None:\n+ if not isinstance(pscore, np.ndarray) or pscore.ndim != 1:\n+ raise ValueError(\"pscore must be 1-dimensional ndarray\")\n+ if not (\n+ action_by_behavior_policy.shape[0] == reward.shape[0] == pscore.shape[0]\n+ ):\n+ raise ValueError(\n+ \"action_by_behavior_policy, reward, and pscore must be the same size\"\n+ )\n+ if np.any(pscore <= 0):\n+ raise ValueError(\"pscore must be positive\")\n+\n+\ndef _check_slate_ope_inputs(\nslate_id: np.ndarray,\nreward: np.ndarray,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add some check functions |
641,014 | 08.07.2021 10:39:27 | -32,400 | 8f5884ba6166f8e5b2e8615438b50a6e6e95d600 | move some arguments to init | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_continuous.py",
"new_path": "obp/dataset/synthetic_continuous.py",
"diff": "@@ -28,6 +28,18 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\ndim_context: int, default=1\nNumber of dimensions of context vectors.\n+ action_noise: float, default=1.0\n+ Standard deviation of the Gaussian noise on the continuous action value.\n+\n+ reward_noise: float, default=1.0\n+ Standard deviation of the Gaussian noise on the reward.\n+\n+ min_action_value: float, default=-np.inf\n+ A minimum possible continuous action value.\n+\n+ max_action_value: float, default=np.inf\n+ A maximum possible continuous action value.\n+\nreward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\nFunction generating expected reward for each given action-context pair,\ni.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n@@ -58,13 +70,13 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\n>>> dataset = SyntheticContinuousBanditDataset(\ndim_context=5,\n+ min_action_value=1,\n+ max_action_value=10,\nreward_function=linear_reward_funcion_continuous,\nbehavior_policy_function=linear_behavior_policy_continuous,\nrandom_state=12345,\n)\n- >>> bandit_feedback = dataset.obtain_batch_bandit_feedback(\n- n_rounds=10000, min_action_value=1, max_action_value=10,\n- )\n+ >>> bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=10000)\n>>> bandit_feedback\n@@ -91,6 +103,10 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\n\"\"\"\ndim_context: int = 1\n+ action_noise: float = 1.0\n+ reward_noise: float = 1.0\n+ min_action_value: float = -np.inf\n+ max_action_value: float = np.inf\nreward_function: Optional[\nCallable[\n[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray], np.ndarray\n@@ -105,6 +121,22 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\ncheck_scalar(self.dim_context, name=\"dim_context\", target_type=int, min_val=1)\n+ check_scalar(\n+ self.action_noise, name=\"action_noise\", target_type=(int, float), min_val=0\n+ )\n+ check_scalar(\n+ self.reward_noise, name=\"reward_noise\", target_type=(int, float), min_val=0\n+ )\n+ check_scalar(\n+ self.min_action_value, name=\"min_action_value\", target_type=(int, float)\n+ )\n+ check_scalar(\n+ self.max_action_value, name=\"max_action_value\", target_type=(int, float)\n+ )\n+ if self.max_action_value <= self.min_action_value:\n+ raise ValueError(\n+ \"`max_action_value` must be larger than `min_action_value`\"\n+ )\nif self.random_state is None:\nraise ValueError(\"random_state must be given\")\nself.random_ = check_random_state(self.random_state)\n@@ -116,10 +148,6 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\ndef obtain_batch_bandit_feedback(\nself,\nn_rounds: int,\n- action_noise: float = 1.0,\n- reward_noise: float = 1.0,\n- min_action_value: float = -np.inf,\n- max_action_value: float = np.inf,\n) -> BanditFeedback:\n\"\"\"Obtain batch logged bandit feedback.\n@@ -128,18 +156,6 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\nn_rounds: int\nNumber of rounds for synthetic bandit feedback data.\n- action_noise: float, default=1.0\n- Standard deviation of the Gaussian noise on the continuous action value.\n-\n- reward_noise: float, default=1.0\n- Standard deviation of the Gaussian noise on the reward.\n-\n- min_action_value: float, default=-np.inf\n- A minimum possible continuous action value.\n-\n- max_action_value: float, default=np.inf\n- A maximum possible continuous action value.\n-\nReturns\n---------\nbandit_feedback: BanditFeedback\n@@ -147,22 +163,6 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\n\"\"\"\ncheck_scalar(n_rounds, name=\"n_rounds\", target_type=int, min_val=1)\n- check_scalar(\n- action_noise, name=\"action_noise\", target_type=(int, float), min_val=0\n- )\n- check_scalar(\n- reward_noise, name=\"reward_noise\", target_type=(int, float), min_val=0\n- )\n- check_scalar(\n- min_action_value, name=\"min_action_value\", target_type=(int, float)\n- )\n- check_scalar(\n- max_action_value, name=\"max_action_value\", target_type=(int, float)\n- )\n- if max_action_value <= min_action_value:\n- raise ValueError(\n- \"`max_action_value` must be larger than `min_action_value`\"\n- )\ncontext = self.random_.normal(size=(n_rounds, self.dim_context))\n# sample actions for each round based on the behavior policy\n@@ -171,29 +171,29 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\ncontext=context,\nrandom_state=self.random_state,\n)\n- a = (min_action_value - expected_action_values) / action_noise\n- b = (max_action_value - expected_action_values) / action_noise\n+ a = (self.min_action_value - expected_action_values) / self.action_noise\n+ b = (self.max_action_value - expected_action_values) / self.action_noise\naction = truncnorm.rvs(\na,\nb,\nloc=expected_action_values,\n- scale=action_noise,\n+ scale=self.action_noise,\nrandom_state=self.random_state,\n)\npscore = truncnorm.pdf(\n- action, a, b, loc=expected_action_values, scale=action_noise\n+ action, a, b, loc=expected_action_values, scale=self.action_noise\n)\nelse:\naction = uniform.rvs(\n- loc=min_action_value,\n- scale=(max_action_value - min_action_value),\n+ loc=self.min_action_value,\n+ scale=(self.max_action_value - self.min_action_value),\nsize=n_rounds,\nrandom_state=self.random_state,\n)\npscore = uniform.pdf(\naction,\n- loc=min_action_value,\n- scale=(max_action_value - min_action_value),\n+ loc=self.min_action_value,\n+ scale=(self.max_action_value - self.min_action_value),\n)\nif self.reward_function is None:\n@@ -203,7 +203,7 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\ncontext=context, action=action, random_state=self.random_state\n)\nreward = expected_reward_ + self.random_.normal(\n- scale=reward_noise, size=n_rounds\n+ scale=self.reward_noise, size=n_rounds\n)\nreturn dict(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | move some arguments to init |
641,014 | 08.07.2021 10:39:44 | -32,400 | 84e74c7e0de9c8c6d86b246ad75e9f3f305f705f | fix tests of SyntheticContinuousBanditDataset | [
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_continuous.py",
"new_path": "tests/dataset/test_synthetic_continuous.py",
"diff": "@@ -12,35 +12,17 @@ from obp.dataset.synthetic_continuous import (\n)\n-def test_synthetic_continuous_init():\n- # dim_context\n- with pytest.raises(ValueError):\n- SyntheticContinuousBanditDataset(dim_context=0)\n-\n- with pytest.raises(TypeError):\n- SyntheticContinuousBanditDataset(dim_context=\"2\")\n-\n- with pytest.raises(TypeError):\n- SyntheticContinuousBanditDataset(dim_context=None)\n-\n- # random_state\n- with pytest.raises(ValueError):\n- SyntheticContinuousBanditDataset(random_state=None)\n-\n- with pytest.raises(ValueError):\n- SyntheticContinuousBanditDataset(random_state=\"3\")\n-\n-\n-# n_rounds, action_noise, reward_noise, min_action_value, max_action_value, err, description\n-invalid_input_of_obtain_batch_bandit_feedback = [\n+# dim_context, action_noise, reward_noise, min_action_value, max_action_value, random_state, err, description\n+invalid_input_of_init = [\n(\n0, #\n1.0,\n1.0,\n-1.0,\n1.0,\n+ 12345,\nValueError,\n- \"`n_rounds`= 0, must be >= 1.\",\n+ \"`dim_context`= 0, must be >= 1.\",\n),\n(\n1.0, #\n@@ -48,8 +30,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n-1.0,\n1.0,\n+ 12345,\nTypeError,\n- \"`n_rounds` must be an instance of <class 'int'>, not <class 'float'>.\",\n+ \"`dim_context` must be an instance of <class 'int'>, not <class 'float'>.\",\n),\n(\n\"3\", #\n@@ -57,8 +40,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n-1.0,\n1.0,\n+ 12345,\nTypeError,\n- \"`n_rounds` must be an instance of <class 'int'>, not <class 'str'>.\",\n+ \"`dim_context` must be an instance of <class 'int'>, not <class 'str'>.\",\n),\n(\nNone, #\n@@ -66,8 +50,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n-1.0,\n1.0,\n+ 12345,\nTypeError,\n- \"`n_rounds` must be an instance of <class 'int'>, not <class 'NoneType'>.\",\n+ \"`dim_context` must be an instance of <class 'int'>, not <class 'NoneType'>.\",\n),\n(\n3,\n@@ -75,6 +60,7 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n-1.0,\n1.0,\n+ 12345,\nValueError,\n\"`action_noise`= -1.0, must be >= 0.\",\n),\n@@ -84,8 +70,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n-1.0,\n1.0,\n+ 12345,\nTypeError,\n- \"`action_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ r\"`action_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n),\n(\n3,\n@@ -93,8 +80,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n-1.0,\n1.0,\n+ 12345,\nTypeError,\n- \"`action_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ r\"`action_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n),\n(\n3,\n@@ -102,6 +90,7 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n-1.0, #\n-1.0,\n1.0,\n+ 12345,\nValueError,\n\"`reward_noise`= -1.0, must be >= 0.\",\n),\n@@ -111,8 +100,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n\"3\", #\n-1.0,\n1.0,\n+ 12345,\nTypeError,\n- \"`reward_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ r\"`reward_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n),\n(\n3,\n@@ -120,8 +110,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\nNone, #\n-1.0,\n1.0,\n+ 12345,\nTypeError,\n- \"`reward_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ r\"`reward_noise` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n),\n(\n3,\n@@ -129,8 +120,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n\"3\", #\n1.0,\n+ 12345,\nTypeError,\n- \"`min_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ r\"`min_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n),\n(\n3,\n@@ -138,8 +130,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\nNone, #\n1.0,\n+ 12345,\nTypeError,\n- \"`min_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ r\"`min_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n),\n(\n3,\n@@ -147,8 +140,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n1.0,\n\"3\", #\n+ 12345,\nTypeError,\n- \"`max_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ r\"`max_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n),\n(\n3,\n@@ -156,8 +150,9 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n1.0,\nNone, #\n+ 12345,\nTypeError,\n- \"`max_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ r\"`max_action_value` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n),\n(\n3,\n@@ -165,35 +160,96 @@ invalid_input_of_obtain_batch_bandit_feedback = [\n1.0,\n1.0, #\n-1.0, #\n+ 12345,\nValueError,\n\"`max_action_value` must be larger than `min_action_value`\",\n),\n+ (\n+ 3,\n+ 1.0,\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ None,\n+ ValueError,\n+ \"random_state must be given\",\n+ ),\n+ (\n+ 3,\n+ 1.0,\n+ 1.0,\n+ -1.0,\n+ 1.0,\n+ \"\",\n+ ValueError,\n+ \"'' cannot be used to seed a numpy.random.RandomState instance\",\n+ ),\n]\[email protected](\n- \"n_rounds, action_noise, reward_noise, min_action_value, max_action_value, err, description\",\n- invalid_input_of_obtain_batch_bandit_feedback,\n+ \"dim_context, action_noise, reward_noise, min_action_value, max_action_value, random_state, err, description\",\n+ invalid_input_of_init,\n)\n-def test_synthetic_continuous_obtain_batch_bandit_feedback_using_invalid_inputs(\n- n_rounds,\n+def test_synthetic_continuous_init_using_invalid_inputs(\n+ dim_context,\naction_noise,\nreward_noise,\nmin_action_value,\nmax_action_value,\n+ random_state,\nerr,\ndescription,\n):\n- dataset = SyntheticContinuousBanditDataset()\n-\nwith pytest.raises(err, match=f\"{description}*\"):\n- _ = dataset.obtain_batch_bandit_feedback(\n- n_rounds=n_rounds,\n+ _ = SyntheticContinuousBanditDataset(\n+ dim_context=dim_context,\naction_noise=action_noise,\nreward_noise=reward_noise,\nmin_action_value=min_action_value,\nmax_action_value=max_action_value,\n+ random_state=random_state,\n+ )\n+\n+\n+# n_rounds, err, description\n+invalid_input_of_obtain_batch_bandit_feedback = [\n+ (\n+ 0, #\n+ ValueError,\n+ \"`n_rounds`= 0, must be >= 1.\",\n+ ),\n+ (\n+ 1.0, #\n+ TypeError,\n+ \"`n_rounds` must be an instance of <class 'int'>, not <class 'float'>.\",\n+ ),\n+ (\n+ \"3\", #\n+ TypeError,\n+ \"`n_rounds` must be an instance of <class 'int'>, not <class 'str'>.\",\n+ ),\n+ (\n+ None, #\n+ TypeError,\n+ \"`n_rounds` must be an instance of <class 'int'>, not <class 'NoneType'>.\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"n_rounds, err, description\",\n+ invalid_input_of_obtain_batch_bandit_feedback,\n)\n+def test_synthetic_continuous_obtain_batch_bandit_feedback_using_invalid_inputs(\n+ n_rounds,\n+ err,\n+ description,\n+):\n+ dataset = SyntheticContinuousBanditDataset()\n+\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\ndef test_synthetic_continuous_obtain_batch_bandit_feedback():\n@@ -201,12 +257,13 @@ def test_synthetic_continuous_obtain_batch_bandit_feedback():\nn_rounds = 10\nmin_action_value = -1.0\nmax_action_value = 1.0\n- dataset = SyntheticContinuousBanditDataset()\n- bandit_feedback = dataset.obtain_batch_bandit_feedback(\n- n_rounds=n_rounds,\n+ dataset = SyntheticContinuousBanditDataset(\nmin_action_value=min_action_value,\nmax_action_value=max_action_value,\n)\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ )\nassert bandit_feedback[\"n_rounds\"] == n_rounds\nassert (\nbandit_feedback[\"context\"].shape[0] == n_rounds # n_rounds\n@@ -279,12 +336,14 @@ def test_synthetic_continuous_calc_policy_value_using_invalid_inputs(\ndef test_synthetic_continuous_calc_policy_value():\nn_rounds = 10\ndim_context = 3\n- dataset = SyntheticContinuousBanditDataset(dim_context=dim_context)\n- bandit_feedback = dataset.obtain_batch_bandit_feedback(\n- n_rounds=n_rounds,\n+ dataset = SyntheticContinuousBanditDataset(\n+ dim_context=dim_context,\nmin_action_value=1,\nmax_action_value=10,\n)\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds,\n+ )\npolicy_value = dataset.calc_ground_truth_policy_value(\ncontext=bandit_feedback[\"context\"],\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix tests of SyntheticContinuousBanditDataset |
641,014 | 08.07.2021 11:04:49 | -32,400 | 0582f84369024a87e189e95cf044d2cd45b8f4b0 | fix some tests to adjust the changes of SyntheticContinuousBanditDataset | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/conftest.py",
"new_path": "tests/ope/conftest.py",
"diff": "@@ -72,14 +72,12 @@ def synthetic_continuous_bandit_feedback() -> BanditFeedback:\nmax_action_value = 10\ndataset = SyntheticContinuousBanditDataset(\ndim_context=dim_context,\n- random_state=random_state,\n- )\n- # obtain feedback\n- bandit_feedback = dataset.obtain_batch_bandit_feedback(\n- n_rounds=n_rounds,\nmin_action_value=min_action_value,\nmax_action_value=max_action_value,\n+ random_state=random_state,\n)\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\nreturn bandit_feedback\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators_continuous.py",
"new_path": "tests/ope/test_dr_estimators_continuous.py",
"diff": "@@ -321,6 +321,9 @@ def test_continuous_ope_performance(kernel):\nreward_function = linear_reward_funcion_continuous\ndataset = SyntheticContinuousBanditDataset(\ndim_context=dim_context,\n+ reward_noise=reward_noise,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\nreward_function=reward_function,\nbehavior_policy_function=behavior_policy_function,\nrandom_state=random_state,\n@@ -328,9 +331,6 @@ def test_continuous_ope_performance(kernel):\n# obtain feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\nn_rounds=n_rounds,\n- reward_noise=reward_noise,\n- min_action_value=min_action_value,\n- max_action_value=max_action_value,\n)\ncontext = bandit_feedback[\"context\"]\naction_by_evaluation_policy = linear_synthetic_policy_continuous(context)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators_continuous.py",
"new_path": "tests/ope/test_ipw_estimators_continuous.py",
"diff": "@@ -355,6 +355,9 @@ def test_continuous_ope_performance(kernel):\nreward_function = linear_reward_funcion_continuous\ndataset = SyntheticContinuousBanditDataset(\ndim_context=dim_context,\n+ reward_noise=reward_noise,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\nreward_function=reward_function,\nbehavior_policy_function=behavior_policy_function,\nrandom_state=random_state,\n@@ -362,9 +365,6 @@ def test_continuous_ope_performance(kernel):\n# obtain feedback\nbandit_feedback = dataset.obtain_batch_bandit_feedback(\nn_rounds=n_rounds,\n- reward_noise=reward_noise,\n- min_action_value=min_action_value,\n- max_action_value=max_action_value,\n)\ncontext = bandit_feedback[\"context\"]\naction_by_evaluation_policy = linear_synthetic_policy_continuous(context)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix some tests to adjust the changes of SyntheticContinuousBanditDataset |
641,014 | 08.07.2021 11:12:53 | -32,400 | 900c4ffa4b66e35c14f7580328e95be9af7ab208 | fix some tests to adjust to the changes of SyntheticContinuousBanditDataset | [
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline_learner_continuous_performance.py",
"new_path": "tests/policy/test_offline_learner_continuous_performance.py",
"diff": "@@ -71,6 +71,10 @@ def test_offline_nn_policy_learner_performance(\n# synthetic data generator\ndataset = SyntheticContinuousBanditDataset(\ndim_context=dim_context,\n+ action_noise=action_noise,\n+ reward_noise=reward_noise,\n+ min_action_value=min_action_value,\n+ max_action_value=max_action_value,\nreward_function=linear_reward_funcion_continuous,\nbehavior_policy_function=linear_behavior_policy_continuous,\nrandom_state=i,\n@@ -90,17 +94,9 @@ def test_offline_nn_policy_learner_performance(\n# sample new training and test sets of synthetic logged bandit feedback\nbandit_feedback_train = dataset.obtain_batch_bandit_feedback(\nn_rounds=n_rounds,\n- action_noise=action_noise,\n- reward_noise=reward_noise,\n- min_action_value=min_action_value,\n- max_action_value=max_action_value,\n)\nbandit_feedback_test = dataset.obtain_batch_bandit_feedback(\nn_rounds=n_rounds,\n- action_noise=action_noise,\n- reward_noise=reward_noise,\n- min_action_value=min_action_value,\n- max_action_value=max_action_value,\n)\n# train the evaluation policy on the training set of the synthetic logged bandit feedback\nnn_policy.fit(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix some tests to adjust to the changes of SyntheticContinuousBanditDataset |
641,014 | 08.07.2021 12:47:50 | -32,400 | bfa4466de043aa6c053424882ac06de10fa578c6 | implement weight clipping and fix some typos | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -8,6 +8,7 @@ from typing import Dict, Optional, Union\nimport numpy as np\nimport torch\n+from sklearn.utils import check_scalar\nfrom ..utils import (\nestimate_confidence_interval_by_bootstrap,\n@@ -169,7 +170,7 @@ class ReplayMethod(BaseOffPolicyEstimator):\n) -> torch.Tensor:\n\"\"\"Estimate policy value of an evaluation policy and return PyTorch Tensor.\nThis is intended for being used with NNPolicyLearner.\n- This is not implemnted for RM because it is indifferentiable.\n+ This is not implemented for RM because it is indifferentiable.\n\"\"\"\nraise NotImplementedError(\n\"This is not implemented because RM is indifferentiable\"\n@@ -257,6 +258,8 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nwhere :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by\na behavior policy :math:`\\\\pi_b`. :math:`w(x,a):=\\\\pi_e (a|x)/\\\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.\n:math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n+ When the weight-clipping is applied, a large importance weight is clipped as :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\lambda, w(x,a) \\\\}`\n+ where :math:`\\\\lambda (>0)` is a hyperparameter that decides a maximum allowed importance weight.\nIPW re-weights the rewards by the ratio of the evaluation policy and behavior policy (importance weight).\nWhen the behavior policy is known, IPW is unbiased and consistent for the true policy value.\n@@ -264,6 +267,10 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nParameters\n------------\n+ lambda_: float, default=np.inf\n+ A maximum possible value of the importance weight.\n+ When a positive finite value is given, then an importance weight larger than `lambda_` will be clipped.\n+\nestimator_name: str, default='ipw'.\nName of off-policy estimator.\n@@ -277,8 +284,18 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\n\"\"\"\n+ lambda_: float = np.inf\nestimator_name: str = \"ipw\"\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize Class.\"\"\"\n+ check_scalar(\n+ self.lambda_,\n+ name=\"lambda_\",\n+ target_type=(int, float),\n+ min_val=0.0,\n+ )\n+\ndef _estimate_round_rewards(\nself,\nreward: Union[np.ndarray, torch.Tensor],\n@@ -854,6 +871,8 @@ class DoublyRobust(BaseOffPolicyEstimator):\n:math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n:math:`\\\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.\n:math:`\\\\hat{q} (x_t,\\\\pi):= \\\\mathbb{E}_{a \\\\sim \\\\pi(a|x)}[\\\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\\\pi`.\n+ When the weight-clipping is applied, a large importance weight is clipped as :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\lambda, w(x,a) \\\\}`\n+ where :math:`\\\\lambda (>0)` is a hyperparameter that decides a maximum allowed importance weight.\nTo estimate the mean reward function, please use `obp.ope.regression_model.RegressionModel`,\nwhich supports several fitting methods specific to OPE such as *more robust doubly robust*.\n@@ -866,6 +885,10 @@ class DoublyRobust(BaseOffPolicyEstimator):\nParameters\n----------\n+ lambda_: float, default=np.inf\n+ A maximum possible value of the importance weight.\n+ When a positive finite value is given, then an importance weight larger than `lambda_` will be clipped.\n+\nestimator_name: str, default='dr'.\nName of off-policy estimator.\n@@ -879,8 +902,18 @@ class DoublyRobust(BaseOffPolicyEstimator):\n\"\"\"\n+ lambda_: float = np.inf\nestimator_name: str = \"dr\"\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize Class.\"\"\"\n+ check_scalar(\n+ self.lambda_,\n+ name=\"lambda_\",\n+ target_type=(int, float),\n+ min_val=0.0,\n+ )\n+\ndef _estimate_round_rewards(\nself,\nreward: Union[np.ndarray, torch.Tensor],\n@@ -1314,15 +1347,11 @@ class SwitchDoublyRobust(DoublyRobust):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not isinstance(self.tau, (float, int)):\n- raise ValueError(\n- f\"switching hyperparameter must be float or integer, but {self.tau} is given\"\n- )\n- if self.tau != self.tau:\n- raise ValueError(\"switching hyperparameter must not be nan\")\n- if self.tau < 0.0:\n- raise ValueError(\n- f\"switching hyperparameter must be larger than or equal to zero, but {self.tau} is given\"\n+ check_scalar(\n+ self.tau,\n+ name=\"tau\",\n+ target_type=(float, int),\n+ min_val=0.0,\n)\ndef _estimate_round_rewards(\n@@ -1387,10 +1416,10 @@ class SwitchDoublyRobust(DoublyRobust):\n) -> torch.Tensor:\n\"\"\"Estimate policy value of an evaluation policy and return PyTorch Tensor.\nThis is intended for being used with NNPolicyLearner.\n- This is not implemnted because swithing is indifferentiable.\n+ This is not implemented because switching is indifferentiable.\n\"\"\"\nraise NotImplementedError(\n- \"This is not implemented for Swtich-DR because it is indifferentiable.\"\n+ \"This is not implemented for Switch-DR because it is indifferentiable.\"\n)\n@@ -1451,15 +1480,11 @@ class DoublyRobustWithShrinkage(DoublyRobust):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not isinstance(self.lambda_, (float, int)):\n- raise ValueError(\n- f\"shrinkage hyperparameter must be float or integer, but {self.lambda_} is given\"\n- )\n- if self.lambda_ != self.lambda_:\n- raise ValueError(\"shrinkage hyperparameter must not be nan\")\n- if self.lambda_ < 0.0:\n- raise ValueError(\n- f\"shrinkage hyperparameter must be larger than or equal to zero, but {self.lambda_} is given\"\n+ check_scalar(\n+ self.lambda_,\n+ name=\"lambda_\",\n+ target_type=(float, int),\n+ min_val=0.0,\n)\ndef _estimate_round_rewards(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_all_estimators.py",
"new_path": "tests/ope/test_all_estimators.py",
"diff": "@@ -112,7 +112,7 @@ valid_input_of_estimation = [\nnp.ones(5),\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n- \"all argumnents are given and len_list > 1\",\n+ \"all arguments are given and len_list > 1\",\n),\n(\ngenerate_action_dist(5, 4, 1),\n@@ -121,7 +121,7 @@ valid_input_of_estimation = [\nnp.ones(5),\nnp.zeros(5, dtype=int),\nnp.zeros((5, 4, 1)),\n- \"all argumnents are given and len_list == 1\",\n+ \"all arguments are given and len_list == 1\",\n),\n(\ngenerate_action_dist(5, 4, 1),\n@@ -130,7 +130,7 @@ valid_input_of_estimation = [\nnp.ones(5),\nNone,\nnp.zeros((5, 4, 1)),\n- \"position argumnent is None\",\n+ \"position argument is None\",\n),\n]\n@@ -314,7 +314,7 @@ valid_input_of_estimation_tensor = [\ntorch.from_numpy(np.ones(5)),\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.from_numpy(np.zeros((5, 4, 3))),\n- \"all argumnents are given and len_list > 1\",\n+ \"all arguments are given and len_list > 1\",\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 1)),\n@@ -323,7 +323,7 @@ valid_input_of_estimation_tensor = [\ntorch.from_numpy(np.ones(5)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros((5, 4, 1))),\n- \"all argumnents are given and len_list == 1\",\n+ \"all arguments are given and len_list == 1\",\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 1)),\n@@ -332,7 +332,7 @@ valid_input_of_estimation_tensor = [\ntorch.from_numpy(np.ones(5)),\nNone,\ntorch.from_numpy(np.zeros((5, 4, 1))),\n- \"position argumnent is None\",\n+ \"position argument is None\",\n),\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -496,7 +496,7 @@ valid_input_of_dr_variants = [\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n0.5,\n- \"all argumnents are given and len_list > 1\",\n+ \"all arguments are given and len_list > 1\",\n)\n]\n@@ -540,7 +540,7 @@ valid_input_tensor_of_dr_variants = [\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n0.5,\n- \"all argumnents are given and len_list > 1\",\n+ \"all arguments are given and len_list > 1\",\n)\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators.py",
"new_path": "tests/ope/test_ipw_estimators.py",
"diff": "@@ -356,7 +356,7 @@ def test_ipw_using_random_evaluation_policy(\nif k in [\"reward\", \"action\", \"pscore\", \"position\"]\n}\ninput_dict[\"action_dist\"] = action_dist\n- # ipw estimtors can be used without estimated_rewards_by_reg_model\n+ # ipw estimators can be used without estimated_rewards_by_reg_model\nfor estimator in [ipw, snipw]:\nestimated_policy_value = estimator.estimate_policy_value(**input_dict)\nassert isinstance(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement weight clipping and fix some typos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.