author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
641,014 | 08.07.2021 13:33:01 | -32,400 | 526c159c8c8e8f7a2a4e7647984ca5631a66d557 | fix tests of inits and add some descriptions | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -14,6 +14,82 @@ from obp.ope import (\n)\nfrom conftest import generate_action_dist\n+\n+invalid_input_of_dr_init = [\n+ (\n+ \"\",\n+ TypeError,\n+ r\"`lambda_` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ),\n+ (\n+ None,\n+ TypeError,\n+ r\"`lambda_` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ ),\n+ (-1.0, ValueError, \"`lambda_`= -1.0, must be >= 0.0.\"),\n+]\n+\n+\[email protected](\n+ \"lambda_, err, description\",\n+ invalid_input_of_dr_init,\n+)\n+def test_dr_init_using_invalid_inputs(\n+ lambda_,\n+ err,\n+ description,\n+):\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = DoublyRobust(lambda_=lambda_)\n+\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = DoublyRobustWithShrinkage(lambda_=lambda_)\n+\n+\n+invalid_input_of_switch_dr_init = [\n+ (\n+ \"\",\n+ TypeError,\n+ r\"`tau` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ),\n+ (\n+ None,\n+ TypeError,\n+ r\"`tau` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ ),\n+ (-1.0, ValueError, \"`tau`= -1.0, must be >= 0.0.\"),\n+]\n+\n+\[email protected](\n+ \"tau, err, description\",\n+ invalid_input_of_switch_dr_init,\n+)\n+def test_switch_dr_init_using_invalid_inputs(\n+ tau,\n+ err,\n+ description,\n+):\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = SwitchDoublyRobust(tau=tau)\n+\n+\n+valid_input_of_dr_init = [\n+ (3.0, \"float lambda_tau\"),\n+ (2, \"integer lambda_tau\"),\n+]\n+\n+\[email protected](\n+ \"lambda_tau, description\",\n+ valid_input_of_dr_init,\n+)\n+def test_shrinkage_using_valid_input_data(lambda_tau: float, description: str) -> None:\n+ _ = DoublyRobust(lambda_=lambda_tau)\n+ _ = DoublyRobustWithShrinkage(lambda_=lambda_tau)\n+ _ = SwitchDoublyRobust(lambda_=lambda_tau)\n+\n+\n# prepare instances\ndm = DirectMethod()\ndr = DoublyRobust()\n@@ -31,7 +107,7 @@ dr_estimators = [dr, dr_shrink_0, sndr, switch_dr_0]\ninvalid_input_of_dr = [\n(\ngenerate_action_dist(5, 4, 3),\n- None,\n+ None, #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -41,7 +117,7 @@ invalid_input_of_dr = [\n(\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\n- None,\n+ None, #\nnp.ones(5),\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n@@ -51,7 +127,7 @@ invalid_input_of_dr = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- None,\n+ None, #\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n\"pscore must be ndarray\",\n@@ -62,12 +138,12 @@ invalid_input_of_dr = [\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n- None,\n+ None, #\n\"estimated_rewards_by_reg_model must be ndarray\",\n),\n(\ngenerate_action_dist(5, 4, 3),\n- np.zeros(5, dtype=float),\n+ np.zeros(5, dtype=float), #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -76,7 +152,7 @@ invalid_input_of_dr = [\n),\n(\ngenerate_action_dist(5, 4, 3),\n- np.zeros(5, dtype=int) - 1,\n+ np.zeros(5, dtype=int) - 1, #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -85,7 +161,7 @@ invalid_input_of_dr = [\n),\n(\ngenerate_action_dist(5, 4, 3),\n- \"4\",\n+ \"4\", #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -94,7 +170,7 @@ invalid_input_of_dr = [\n),\n(\ngenerate_action_dist(5, 4, 3),\n- np.zeros((3, 2), dtype=int),\n+ np.zeros((3, 2), dtype=int), #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -103,7 +179,7 @@ invalid_input_of_dr = [\n),\n(\ngenerate_action_dist(5, 4, 3),\n- np.zeros(5, dtype=int) + 8,\n+ np.zeros(5, dtype=int) + 8, #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -113,7 +189,7 @@ invalid_input_of_dr = [\n(\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\n- \"4\",\n+ \"4\", #\nnp.ones(5),\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n@@ -122,7 +198,7 @@ invalid_input_of_dr = [\n(\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\n- np.zeros((3, 2), dtype=int),\n+ np.zeros((3, 2), dtype=int), #\nnp.ones(5),\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n@@ -131,7 +207,7 @@ invalid_input_of_dr = [\n(\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\n- np.zeros(4, dtype=int),\n+ np.zeros(4, dtype=int), #\nnp.ones(5),\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n@@ -141,7 +217,7 @@ invalid_input_of_dr = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- \"4\",\n+ \"4\", #\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n\"pscore must be ndarray\",\n@@ -150,7 +226,7 @@ invalid_input_of_dr = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- np.ones((5, 3)),\n+ np.ones((5, 3)), #\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n\"pscore must be 1-dimensional\",\n@@ -159,7 +235,7 @@ invalid_input_of_dr = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- np.ones(4),\n+ np.ones(4), #\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n\"action, reward, and pscore must be the same size.\",\n@@ -168,7 +244,7 @@ invalid_input_of_dr = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- np.arange(5),\n+ np.arange(5), #\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n\"pscore must be positive\",\n@@ -179,7 +255,7 @@ invalid_input_of_dr = [\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n- np.zeros((5, 4, 2)),\n+ np.zeros((5, 4, 2)), #\n\"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\",\n),\n(\n@@ -188,7 +264,7 @@ invalid_input_of_dr = [\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n- \"4\",\n+ \"4\", #\n\"estimated_rewards_by_reg_model must be ndarray\",\n),\n]\n@@ -234,7 +310,7 @@ def test_dr_using_invalid_input_data(\ninvalid_input_tensor_of_dr = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- None,\n+ None, #\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -244,7 +320,7 @@ invalid_input_tensor_of_dr = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\n- None,\n+ None, #\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n@@ -254,7 +330,7 @@ invalid_input_tensor_of_dr = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.int64),\n- None,\n+ None, #\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n\"pscore must be Tensor\",\n@@ -265,12 +341,12 @@ invalid_input_tensor_of_dr = [\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n- None,\n+ None, #\n\"estimated_rewards_by_reg_model must be Tensor\",\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- torch.zeros(5, dtype=torch.float32),\n+ torch.zeros(5, dtype=torch.float32), #\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -279,7 +355,7 @@ invalid_input_tensor_of_dr = [\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- torch.zeros(5, dtype=torch.int64) - 1,\n+ torch.zeros(5, dtype=torch.int64) - 1, #\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -288,7 +364,7 @@ invalid_input_tensor_of_dr = [\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- \"4\",\n+ \"4\", #\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -297,7 +373,7 @@ invalid_input_tensor_of_dr = [\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- torch.zeros((3, 2), dtype=torch.int64),\n+ torch.zeros((3, 2), dtype=torch.int64), #\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -306,7 +382,7 @@ invalid_input_tensor_of_dr = [\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- torch.zeros(5, dtype=torch.int64) + 8,\n+ torch.zeros(5, dtype=torch.int64) + 8, #\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -316,7 +392,7 @@ invalid_input_tensor_of_dr = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\n- \"4\",\n+ \"4\", #\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n@@ -325,7 +401,7 @@ invalid_input_tensor_of_dr = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\n- torch.zeros((3, 2), dtype=torch.int64),\n+ torch.zeros((3, 2), dtype=torch.int64), #\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n@@ -334,7 +410,7 @@ invalid_input_tensor_of_dr = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\n- torch.zeros(4, dtype=torch.int64),\n+ torch.zeros(4, dtype=torch.int64), #\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n@@ -344,7 +420,7 @@ invalid_input_tensor_of_dr = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.int64),\n- \"4\",\n+ \"4\", #\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n\"pscore must be Tensor\",\n@@ -353,7 +429,7 @@ invalid_input_tensor_of_dr = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.int64),\n- torch.ones((5, 3)),\n+ torch.ones((5, 3)), #\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n\"pscore must be 1-dimensional\",\n@@ -362,7 +438,7 @@ invalid_input_tensor_of_dr = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.int64),\n- torch.ones(4),\n+ torch.ones(4), #\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n\"action, reward, and pscore must be the same size.\",\n@@ -371,7 +447,7 @@ invalid_input_tensor_of_dr = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.int64),\n- torch.from_numpy(np.arange(5)),\n+ torch.from_numpy(np.arange(5)), #\ntorch.from_numpy(np.random.choice(3, size=5)),\ntorch.zeros((5, 4, 3)),\n\"pscore must be positive\",\n@@ -382,7 +458,7 @@ invalid_input_tensor_of_dr = [\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n- torch.zeros((5, 4, 2)),\n+ torch.zeros((5, 4, 2)), #\n\"estimated_rewards_by_reg_model.shape must be the same as action_dist.shape\",\n),\n(\n@@ -391,7 +467,7 @@ invalid_input_tensor_of_dr = [\ntorch.zeros(5, dtype=torch.int64),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n- \"4\",\n+ \"4\", #\n\"estimated_rewards_by_reg_model must be Tensor\",\n),\n]\n@@ -423,69 +499,6 @@ def test_dr_using_invalid_input_tensor_data(\n)\n-# switch-dr\n-\n-invalid_input_of_switch = [\n- (\"a\", \"switching hyperparameter must be float or integer\"),\n- (-1.0, \"switching hyperparameter must be larger than or equal to zero\"),\n- (np.nan, \"switching hyperparameter must not be nan\"),\n-]\n-\n-\[email protected](\n- \"tau, description\",\n- invalid_input_of_switch,\n-)\n-def test_switch_using_invalid_input_data(tau: float, description: str) -> None:\n- with pytest.raises(ValueError, match=f\"{description}*\"):\n- _ = SwitchDoublyRobust(tau=tau)\n-\n-\n-valid_input_of_switch = [\n- (3.0, \"float tau\"),\n- (2, \"integer tau\"),\n-]\n-\n-\[email protected](\n- \"tau, description\",\n- valid_input_of_switch,\n-)\n-def test_switch_using_valid_input_data(tau: float, description: str) -> None:\n- _ = SwitchDoublyRobust(tau=tau)\n-\n-\n-# dr-os\n-invalid_input_of_shrinkage = [\n- (\"a\", \"shrinkage hyperparameter must be float or integer\"),\n- (-1.0, \"shrinkage hyperparameter must be larger than or equal to zero\"),\n- (np.nan, \"shrinkage hyperparameter must not be nan\"),\n-]\n-\n-\[email protected](\n- \"lambda_, description\",\n- invalid_input_of_shrinkage,\n-)\n-def test_shrinkage_using_invalid_input_data(lambda_: float, description: str) -> None:\n- with pytest.raises(ValueError, match=f\"{description}*\"):\n- _ = DoublyRobustWithShrinkage(lambda_=lambda_)\n-\n-\n-valid_input_of_shrinkage = [\n- (3.0, \"float lambda_\"),\n- (2, \"integer lambda_\"),\n-]\n-\n-\[email protected](\n- \"lambda_, description\",\n- valid_input_of_shrinkage,\n-)\n-def test_shrinkage_using_valid_input_data(lambda_: float, description: str) -> None:\n- _ = DoublyRobustWithShrinkage(lambda_=lambda_)\n-\n-\n# dr variants\nvalid_input_of_dr_variants = [\n(\n@@ -589,7 +602,7 @@ def test_dr_using_random_evaluation_policy(\n}\ninput_dict[\"action_dist\"] = action_dist\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\n- # dr estimtors require all arguments\n+ # dr estimators require all arguments\nfor estimator in dr_estimators:\nestimated_policy_value = estimator.estimate_policy_value(**input_dict)\nassert isinstance(\n@@ -619,13 +632,13 @@ def test_dr_using_random_evaluation_policy(\ninput_tensor_dict[\"estimated_rewards_by_reg_model\"] = torch.from_numpy(\nexpected_reward\n)\n- # dr estimtors require all arguments\n+ # dr estimators require all arguments\nfor estimator in dr_estimators:\nif estimator.estimator_name == \"switch-dr\":\nwith pytest.raises(\nNotImplementedError,\nmatch=re.escape(\n- \"This is not implemented for Swtich-DR because it is indifferentiable.\"\n+ \"This is not implemented for Switch-DR because it is indifferentiable.\"\n),\n):\n_ = estimator.estimate_policy_value_tensor(**input_tensor_dict)\n@@ -646,7 +659,7 @@ def test_dr_using_random_evaluation_policy(\nwith pytest.raises(\nNotImplementedError,\nmatch=re.escape(\n- \"This is not implemented for Swtich-DR because it is indifferentiable.\"\n+ \"This is not implemented for Switch-DR because it is indifferentiable.\"\n),\n):\n_ = estimator.estimate_policy_value_tensor(**input_tensor_dict)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators.py",
"new_path": "tests/ope/test_ipw_estimators.py",
"diff": "@@ -11,6 +11,25 @@ from obp.ope import (\n)\nfrom conftest import generate_action_dist\n+\n+def test_ipw_init():\n+ # lambda_\n+ with pytest.raises(\n+ TypeError,\n+ match=r\"`lambda_` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ ):\n+ InverseProbabilityWeighting(lambda_=None)\n+\n+ with pytest.raises(\n+ TypeError,\n+ match=r\"`lambda_` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ):\n+ InverseProbabilityWeighting(lambda_=\"\")\n+\n+ with pytest.raises(ValueError, match=r\"`lambda_`= -1.0, must be >= 0.0.\"):\n+ InverseProbabilityWeighting(lambda_=-1.0)\n+\n+\n# prepare ipw instances\nipw = InverseProbabilityWeighting()\nsnipw = SelfNormalizedInverseProbabilityWeighting()\n@@ -20,7 +39,7 @@ snipw = SelfNormalizedInverseProbabilityWeighting()\ninvalid_input_of_ipw = [\n(\ngenerate_action_dist(5, 4, 3),\n- None,\n+ None, #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -29,7 +48,7 @@ invalid_input_of_ipw = [\n(\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\n- None,\n+ None, #\nnp.ones(5),\nnp.random.choice(3, size=5),\n\"reward must be ndarray\",\n@@ -38,13 +57,13 @@ invalid_input_of_ipw = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- None,\n+ None, #\nnp.random.choice(3, size=5),\n\"pscore must be ndarray\",\n),\n(\ngenerate_action_dist(5, 4, 3),\n- np.zeros(5, dtype=float),\n+ np.zeros(5, dtype=float), #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -52,7 +71,7 @@ invalid_input_of_ipw = [\n),\n(\ngenerate_action_dist(5, 4, 3),\n- np.zeros(5, dtype=int) - 1,\n+ np.zeros(5, dtype=int) - 1, #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -60,7 +79,7 @@ invalid_input_of_ipw = [\n),\n(\ngenerate_action_dist(5, 4, 3),\n- \"4\",\n+ \"4\", #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -68,7 +87,7 @@ invalid_input_of_ipw = [\n),\n(\ngenerate_action_dist(5, 4, 3),\n- np.zeros((3, 2), dtype=int),\n+ np.zeros((3, 2), dtype=int), #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -76,7 +95,7 @@ invalid_input_of_ipw = [\n),\n(\ngenerate_action_dist(5, 4, 3),\n- np.zeros(5, dtype=int) + 8,\n+ np.zeros(5, dtype=int) + 8, #\nnp.zeros(5, dtype=int),\nnp.ones(5),\nnp.random.choice(3, size=5),\n@@ -85,7 +104,7 @@ invalid_input_of_ipw = [\n(\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\n- \"4\",\n+ \"4\", #\nnp.ones(5),\nnp.random.choice(3, size=5),\n\"reward must be ndarray\",\n@@ -93,7 +112,7 @@ invalid_input_of_ipw = [\n(\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\n- np.zeros((3, 2), dtype=int),\n+ np.zeros((3, 2), dtype=int), #\nnp.ones(5),\nnp.random.choice(3, size=5),\n\"reward must be 1-dimensional\",\n@@ -101,7 +120,7 @@ invalid_input_of_ipw = [\n(\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\n- np.zeros(4, dtype=int),\n+ np.zeros(4, dtype=int), #\nnp.ones(5),\nnp.random.choice(3, size=5),\n\"action and reward must be the same size.\",\n@@ -110,7 +129,7 @@ invalid_input_of_ipw = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- \"4\",\n+ \"4\", #\nnp.random.choice(3, size=5),\n\"pscore must be ndarray\",\n),\n@@ -118,7 +137,7 @@ invalid_input_of_ipw = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- np.ones((5, 3)),\n+ np.ones((5, 3)), #\nnp.random.choice(3, size=5),\n\"pscore must be 1-dimensional\",\n),\n@@ -126,7 +145,7 @@ invalid_input_of_ipw = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- np.ones(4),\n+ np.ones(4), #\nnp.random.choice(3, size=5),\n\"action, reward, and pscore must be the same size.\",\n),\n@@ -134,7 +153,7 @@ invalid_input_of_ipw = [\ngenerate_action_dist(5, 4, 3),\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\n- np.arange(5),\n+ np.arange(5), #\nnp.random.choice(3, size=5),\n\"pscore must be positive\",\n),\n@@ -191,7 +210,7 @@ def test_ipw_using_invalid_input_data(\ninvalid_input_tensor_of_ipw = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- None,\n+ None, #\ntorch.zeros(5, dtype=torch.float32),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -200,7 +219,7 @@ invalid_input_tensor_of_ipw = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\n- None,\n+ None, #\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"reward must be Tensor\",\n@@ -209,13 +228,13 @@ invalid_input_tensor_of_ipw = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.float32),\n- None,\n+ None, #\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"pscore must be Tensor\",\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- torch.zeros(5, dtype=torch.float64),\n+ torch.zeros(5, dtype=torch.float64), #\ntorch.zeros(5, dtype=torch.float32),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -223,7 +242,7 @@ invalid_input_tensor_of_ipw = [\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- torch.zeros(5, dtype=torch.float64) - 1,\n+ torch.zeros(5, dtype=torch.float64) - 1, #\ntorch.zeros(5, dtype=torch.float32),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -231,7 +250,7 @@ invalid_input_tensor_of_ipw = [\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- \"4\",\n+ \"4\", #\ntorch.zeros(5, dtype=torch.float32),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -239,7 +258,7 @@ invalid_input_tensor_of_ipw = [\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- torch.zeros((3, 2), dtype=torch.int64),\n+ torch.zeros((3, 2), dtype=torch.int64), #\ntorch.zeros(5, dtype=torch.float32),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -247,7 +266,7 @@ invalid_input_tensor_of_ipw = [\n),\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\n- torch.zeros(5, dtype=torch.int64) + 8,\n+ torch.zeros(5, dtype=torch.int64) + 8, #\ntorch.zeros(5, dtype=torch.float32),\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n@@ -256,7 +275,7 @@ invalid_input_tensor_of_ipw = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\n- \"4\",\n+ \"4\", #\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"reward must be Tensor\",\n@@ -264,7 +283,7 @@ invalid_input_tensor_of_ipw = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\n- torch.zeros((3, 2), dtype=torch.float32),\n+ torch.zeros((3, 2), dtype=torch.float32), #\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"reward must be 1-dimensional\",\n@@ -272,7 +291,7 @@ invalid_input_tensor_of_ipw = [\n(\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\n- torch.zeros(4, dtype=torch.float32),\n+ torch.zeros(4, dtype=torch.float32), #\ntorch.ones(5),\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"action and reward must be the same size.\",\n@@ -281,7 +300,7 @@ invalid_input_tensor_of_ipw = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.float32),\n- \"4\",\n+ \"4\", #\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"pscore must be Tensor\",\n),\n@@ -289,7 +308,7 @@ invalid_input_tensor_of_ipw = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.float32),\n- torch.ones((5, 3)),\n+ torch.ones((5, 3)), #\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"pscore must be 1-dimensional\",\n),\n@@ -297,7 +316,7 @@ invalid_input_tensor_of_ipw = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.float32),\n- torch.ones(4),\n+ torch.ones(4), #\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"action, reward, and pscore must be the same size.\",\n),\n@@ -305,7 +324,7 @@ invalid_input_tensor_of_ipw = [\ntorch.from_numpy(generate_action_dist(5, 4, 3)),\ntorch.zeros(5, dtype=torch.int64),\ntorch.zeros(5, dtype=torch.float32),\n- torch.from_numpy(np.arange(5)),\n+ torch.from_numpy(np.arange(5)), #\ntorch.from_numpy(np.random.choice(3, size=5)),\n\"pscore must be positive\",\n),\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix tests of inits and add some descriptions |
641,014 | 08.07.2021 19:45:12 | -32,400 | cbc33686e8cf2c60144cce67ab7452feb4b146b2 | add checks wrt np.nan | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -258,7 +258,7 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nwhere :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by\na behavior policy :math:`\\\\pi_b`. :math:`w(x,a):=\\\\pi_e (a|x)/\\\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.\n:math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n- When the weight-clipping is applied, a large importance weight is clipped as :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\lambda, w(x,a) \\\\}`\n+ When the weight-clipping is applied, a large importance weight is clipped as :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\\\lambda, w(x,a) \\\\}`\nwhere :math:`\\\\lambda (>0)` is a hyperparameter that decides a maximum allowed importance weight.\nIPW re-weights the rewards by the ratio of the evaluation policy and behavior policy (importance weight).\n@@ -269,7 +269,7 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\n------------\nlambda_: float, default=np.inf\nA maximum possible value of the importance weight.\n- When a positive finite value is given, then an importance weight larger than `lambda_` will be clipped.\n+ When a positive finite value is given, then importance weights larger than `lambda_` will be clipped.\nestimator_name: str, default='ipw'.\nName of off-policy estimator.\n@@ -295,6 +295,8 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\ntarget_type=(int, float),\nmin_val=0.0,\n)\n+ if self.lambda_ != self.lambda_:\n+ raise ValueError(\"lambda_ must not be nan\")\ndef _estimate_round_rewards(\nself,\n@@ -874,7 +876,7 @@ class DoublyRobust(BaseOffPolicyEstimator):\n:math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n:math:`\\\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.\n:math:`\\\\hat{q} (x_t,\\\\pi):= \\\\mathbb{E}_{a \\\\sim \\\\pi(a|x)}[\\\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\\\pi`.\n- When the weight-clipping is applied, a large importance weight is clipped as :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\lambda, w(x,a) \\\\}`\n+ When the weight-clipping is applied, a large importance weight is clipped as :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\\\lambda, w(x,a) \\\\}`\nwhere :math:`\\\\lambda (>0)` is a hyperparameter that decides a maximum allowed importance weight.\nTo estimate the mean reward function, please use `obp.ope.regression_model.RegressionModel`,\n@@ -890,7 +892,7 @@ class DoublyRobust(BaseOffPolicyEstimator):\n----------\nlambda_: float, default=np.inf\nA maximum possible value of the importance weight.\n- When a positive finite value is given, then an importance weight larger than `lambda_` will be clipped.\n+ When a positive finite value is given, then importance weights larger than `lambda_` will be clipped.\nDoublyRobust with a finite positive `lambda_` corresponds to the Doubly Robust with pessimistic shrinkage stated in Su et al.(2020).\nestimator_name: str, default='dr'.\n@@ -920,6 +922,8 @@ class DoublyRobust(BaseOffPolicyEstimator):\ntarget_type=(int, float),\nmin_val=0.0,\n)\n+ if self.lambda_ != self.lambda_:\n+ raise ValueError(\"lambda_ must not be nan\")\ndef _estimate_round_rewards(\nself,\n@@ -1363,6 +1367,8 @@ class SwitchDoublyRobust(DoublyRobust):\ntarget_type=(int, float),\nmin_val=0.0,\n)\n+ if self.tau != self.tau:\n+ raise ValueError(\"tau must not be nan\")\ndef _estimate_round_rewards(\nself,\n@@ -1496,6 +1502,8 @@ class DoublyRobustWithShrinkage(DoublyRobust):\ntarget_type=(int, float),\nmin_val=0.0,\n)\n+ if self.lambda_ != self.lambda_:\n+ raise ValueError(\"lambda_ must not be nan\")\ndef _estimate_round_rewards(\nself,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -27,6 +27,7 @@ invalid_input_of_dr_init = [\nr\"`lambda_` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n),\n(-1.0, ValueError, \"`lambda_`= -1.0, must be >= 0.0.\"),\n+ (np.nan, ValueError, \"lambda_ must not be nan\"),\n]\n@@ -58,6 +59,7 @@ invalid_input_of_switch_dr_init = [\nr\"`tau` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n),\n(-1.0, ValueError, \"`tau`= -1.0, must be >= 0.0.\"),\n+ (np.nan, ValueError, \"tau must not be nan\"),\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators.py",
"new_path": "tests/ope/test_ipw_estimators.py",
"diff": "@@ -29,6 +29,9 @@ def test_ipw_init():\nwith pytest.raises(ValueError, match=r\"`lambda_`= -1.0, must be >= 0.0.\"):\nInverseProbabilityWeighting(lambda_=-1.0)\n+ with pytest.raises(ValueError, match=r\"lambda_ must not be nan\"):\n+ InverseProbabilityWeighting(lambda_=np.nan)\n+\n# prepare ipw instances\nipw = InverseProbabilityWeighting()\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add checks wrt np.nan |
641,014 | 09.07.2021 19:21:05 | -32,400 | ebc94523b642e1c677e35c5946c2380bd835c0b3 | implement _estimate_mse_upper_bound method | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -537,6 +537,71 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nrandom_state=random_state,\n)\n+ def _estimate_mse_upper_bound(\n+ self,\n+ reward: np.ndarray,\n+ action: np.ndarray,\n+ pscore: np.ndarray,\n+ action_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ max_reward_value: Optional[Union[int, float]] = None,\n+ ) -> float:\n+ \"\"\"Estimate the upper bound of the mean-squared-error of IPW with a given clipping hyperparameter.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\n+ max_reward_value: int or float, default=None\n+ A maximum possible reward, which is necessary for the hyperparameter tuning.\n+ If None is given, `reward.max()` is used.\n+\n+ Returns\n+ ----------\n+ estimated_mse_upper_bound: float\n+ Estimated upper bound of MSE with a given clipping hyperparameter `lambda_`.\n+ This is estimated using the automatic hyperparameter tuning procedure\n+ based on Section 4.2 of Wang et al.(2017).\n+\n+ \"\"\"\n+ if max_reward_value is None:\n+ max_reward_value = reward.max()\n+\n+ n_rounds = reward.shape[0]\n+ # estimate the variance of IPW with clipping\n+ var_hat = np.var(\n+ self._estimate_round_rewards(\n+ reward=reward,\n+ action=action,\n+ pscore=pscore,\n+ action_dist=action_dist,\n+ position=position,\n+ )\n+ )\n+ var_hat /= n_rounds\n+\n+ # estimate the upper bound of the bias of IPW with clipping\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ iw_hat = np.minimum(iw, self.lambda_)\n+ bias_upper_bound_arr = (iw - iw_hat) * max_reward_value\n+ bias_upper_bound = bias_upper_bound_arr.mean()\n+\n+ estimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\n+ return estimated_mse_upper_bound\n+\n@dataclass\nclass SelfNormalizedInverseProbabilityWeighting(InverseProbabilityWeighting):\n@@ -1211,6 +1276,76 @@ class DoublyRobust(BaseOffPolicyEstimator):\nrandom_state=random_state,\n)\n+ def _estimate_mse_upper_bound(\n+ self,\n+ reward: np.ndarray,\n+ action: np.ndarray,\n+ pscore: np.ndarray,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ max_reward_value: Optional[Union[int, float]] = None,\n+ ) -> float:\n+ \"\"\"Estimate the upper bound of the mean-squared-error of DR with a given clipping hyperparameter.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\n+ Expected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ max_reward_value: int or float, default=None\n+ A maximum possible reward, which is necessary for the hyperparameter tuning.\n+ If None is given, `reward.max()` is used.\n+\n+ Returns\n+ ----------\n+ estimated_mse_upper_bound: float\n+ Estimated upper bound of MSE with a given clipping hyperparameter `lambda_`.\n+ This is estimated using the automatic hyperparameter tuning procedure\n+ based on Section 4.2 of Wang et al.(2017).\n+\n+ \"\"\"\n+ if max_reward_value is None:\n+ max_reward_value = reward.max()\n+\n+ n_rounds = reward.shape[0]\n+ # estimate the variance of DR with clipping\n+ var_hat = np.var(\n+ self._estimate_round_rewards(\n+ reward=reward,\n+ action=action,\n+ pscore=pscore,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ position=position,\n+ )\n+ )\n+ var_hat /= n_rounds\n+\n+ # estimate the upper bound of the bias of DR with clipping\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ iw_hat = np.minimum(iw, self.lambda_)\n+ bias_upper_bound_arr = (iw - iw_hat) * max_reward_value\n+ bias_upper_bound = bias_upper_bound_arr.mean()\n+\n+ estimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\n+ return estimated_mse_upper_bound\n+\n@dataclass\nclass SelfNormalizedDoublyRobust(DoublyRobust):\n@@ -1339,7 +1474,7 @@ class SwitchDoublyRobust(DoublyRobust):\nParameters\n----------\n- tau: float, default=1\n+ tau: float, default=np.inf\nSwitching hyperparameter. When importance weight is larger than this parameter, the DM estimator is applied, otherwise the DR estimator is applied.\nThis hyperparameter should be larger than or equal to 0., otherwise it is meaningless.\n@@ -1356,7 +1491,7 @@ class SwitchDoublyRobust(DoublyRobust):\n\"\"\"\n- tau: float = 1.0\n+ tau: float = np.inf\nestimator_name: str = \"switch-dr\"\ndef __post_init__(self) -> None:\n@@ -1379,7 +1514,7 @@ class SwitchDoublyRobust(DoublyRobust):\nestimated_rewards_by_reg_model: np.ndarray,\nposition: Optional[np.ndarray] = None,\n**kwargs,\n- ) -> float:\n+ ) -> np.ndarray:\n\"\"\"Estimate rewards for each round.\nParameters\n@@ -1430,7 +1565,8 @@ class SwitchDoublyRobust(DoublyRobust):\nself,\n**kwargs,\n) -> torch.Tensor:\n- \"\"\"Estimate policy value of an evaluation policy and return PyTorch Tensor.\n+ \"\"\"\n+ Estimate policy value of an evaluation policy and return PyTorch Tensor.\nThis is intended for being used with NNPolicyLearner.\nThis is not implemented because switching is indifferentiable.\n\"\"\"\n@@ -1438,6 +1574,76 @@ class SwitchDoublyRobust(DoublyRobust):\n\"This is not implemented for Switch-DR because it is indifferentiable.\"\n)\n+ def _estimate_mse_upper_bound(\n+ self,\n+ reward: np.ndarray,\n+ action: np.ndarray,\n+ pscore: np.ndarray,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ max_reward_value: Optional[Union[int, float]] = None,\n+ ) -> float:\n+ \"\"\"Estimate the upper bound of the mean-squared-error of Switch-DR with a given hyperparameter.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\n+ Expected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\n+ max_reward_value: int or float, default=None\n+ A maximum possible reward, which is necessary for the hyperparameter tuning.\n+ If None is given, `reward.max()` is used.\n+\n+ Returns\n+ ----------\n+ estimated_mse_upper_bound: float\n+ Estimated upper bound of MSE with a given switching hyperparameter `tau`.\n+ This is estimated using the automatic hyperparameter tuning procedure\n+ based on Section 4.2 of Wang et al.(2017).\n+\n+ \"\"\"\n+ if max_reward_value is None:\n+ max_reward_value = reward.max()\n+\n+ n_rounds = reward.shape[0]\n+ # estimate the variance of Switch-DR (Eq.(8) of Wang et al.(2017))\n+ var_hat = np.var(\n+ self._estimate_round_rewards(\n+ reward=reward,\n+ action=action,\n+ pscore=pscore,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ position=position,\n+ )\n+ )\n+ var_hat /= n_rounds\n+\n+ # estimate the upper bound of the bias of Switch-DR\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ iw_hat = iw * np.array(iw <= self.tau, dtype=int)\n+ bias_upper_bound_arr = (iw - iw_hat) * max_reward_value\n+ bias_upper_bound = bias_upper_bound_arr.mean()\n+\n+ estimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\n+ return estimated_mse_upper_bound\n+\n@dataclass\nclass DoublyRobustWithShrinkage(DoublyRobust):\n@@ -1567,3 +1773,73 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nestimated_rewards += shrinkage_weight * (reward - q_hat_factual)\nreturn estimated_rewards\n+\n+ def _estimate_mse_upper_bound(\n+ self,\n+ reward: np.ndarray,\n+ action: np.ndarray,\n+ pscore: np.ndarray,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ max_reward_value: Optional[Union[int, float]] = None,\n+ ) -> float:\n+ \"\"\"Estimate the upper bound of the mean-squared-error of DR with a given hyperparameter.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Action choice probabilities by a behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities by the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\n+ Expected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Positions of each round in the given logged bandit feedback.\n+\n+ max_reward_value: int or float, default=None\n+ A maximum possible reward, which is necessary for the hyperparameter tuning.\n+ If None is given, `reward.max()` is used.\n+\n+ Returns\n+ ----------\n+ estimated_mse_upper_bound: float\n+ Estimated upper bound of MSE with a given shrinkage hyperparameter `lambda_`.\n+ This is estimated using the automatic hyperparameter tuning procedure\n+ based on Section 4.2 of Wang et al.(2017).\n+\n+ \"\"\"\n+ if max_reward_value is None:\n+ max_reward_value = reward.max()\n+\n+ n_rounds = reward.shape[0]\n+ # estimate the variance of DRos\n+ var_hat = np.var(\n+ self._estimate_round_rewards(\n+ reward=reward,\n+ action=action,\n+ pscore=pscore,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ position=position,\n+ )\n+ )\n+ var_hat /= n_rounds\n+\n+ # estimate the upper bound of the bias of DRos\n+ iw = action_dist[np.arange(n_rounds), action, position] / pscore\n+ iw_hat = (self.lambda_ * iw) / (iw ** 2 + self.lambda_)\n+ bias_upper_bound_arr = (iw - iw_hat) * max_reward_value\n+ bias_upper_bound = bias_upper_bound_arr.mean()\n+\n+ estimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\n+ return estimated_mse_upper_bound\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement _estimate_mse_upper_bound method |
641,014 | 09.07.2021 19:28:33 | -32,400 | 0a8b7edc3d6ea6e9043bce15bce1fb5493cc92bb | add some tests and rename variables | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -207,9 +207,9 @@ def test_dr_tuning_init_using_valid_input_data(lambdas_taus, description):\ndm = DirectMethod()\ndr = DoublyRobust()\ndr_tuning = DoublyRobustTuning(lambdas=[1, 100])\n-dr_shrink_0 = DoublyRobustWithShrinkage(lambda_=0.0)\n-dr_shrink_tuning = DoublyRobustWithShrinkageTuning(lambdas=[1, 100])\n-dr_shrink_max = DoublyRobustWithShrinkage(lambda_=1e10)\n+dr_os_0 = DoublyRobustWithShrinkage(lambda_=0.0)\n+dr_os_tuning = DoublyRobustWithShrinkageTuning(lambdas=[1, 100])\n+dr_os_max = DoublyRobustWithShrinkage(lambda_=1e10)\nsndr = SelfNormalizedDoublyRobust()\nswitch_dr_0 = SwitchDoublyRobust(tau=0.0)\nswitch_dr_tuning = SwitchDoublyRobustTuning(taus=[1, 100])\n@@ -218,8 +218,8 @@ switch_dr_max = SwitchDoublyRobust(tau=1e10)\ndr_estimators = [\ndr,\ndr_tuning,\n- dr_shrink_0,\n- dr_shrink_tuning,\n+ dr_os_0,\n+ dr_os_tuning,\nsndr,\nswitch_dr_0,\nswitch_dr_tuning,\n@@ -654,8 +654,10 @@ def test_dr_variants_using_valid_input_data(\n) -> None:\n# check dr variants\nswitch_dr = SwitchDoublyRobust(tau=hyperparameter)\n+ switch_dr_tuning = SwitchDoublyRobustTuning(taus=[hyperparameter, hyperparameter * 10])\ndr_os = DoublyRobustWithShrinkage(lambda_=hyperparameter)\n- for estimator in [switch_dr, dr_os]:\n+ dr_os_tuning = DoublyRobustWithShrinkageTuning(lambdas=[hyperparameter, hyperparameter * 10])\n+ for estimator in [switch_dr, switch_dr_tuning, dr_os, dr_os_tuning]:\nest = estimator.estimate_policy_value(\naction_dist=action_dist,\naction=action,\n@@ -838,7 +840,7 @@ def test_boundedness_of_sndr_using_random_evaluation_policy(\n), f\"estimated policy value of sndr should be smaller than or equal to 2 (because of its 2-boundedness), but the value is: {estimated_policy_value.item()}\"\n-def test_dr_shrinkage_using_random_evaluation_policy(\n+def test_dr_osage_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n\"\"\"\n@@ -856,13 +858,13 @@ def test_dr_shrinkage_using_random_evaluation_policy(\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\ndm_value = dm.estimate_policy_value(**input_dict)\ndr_value = dr.estimate_policy_value(**input_dict)\n- dr_shrink_0_value = dr_shrink_0.estimate_policy_value(**input_dict)\n- dr_shrink_max_value = dr_shrink_max.estimate_policy_value(**input_dict)\n+ dr_os_0_value = dr_os_0.estimate_policy_value(**input_dict)\n+ dr_os_max_value = dr_os_max.estimate_policy_value(**input_dict)\nassert (\n- dm_value == dr_shrink_0_value\n+ dm_value == dr_os_0_value\n), \"DoublyRobustWithShrinkage (lambda=0) should be the same as DirectMethod\"\nassert (\n- np.abs(dr_value - dr_shrink_max_value) < 1e-5\n+ np.abs(dr_value - dr_os_max_value) < 1e-5\n), \"DoublyRobustWithShrinkage (lambda=inf) should be almost the same as DoublyRobust\"\n# prepare input dict\n@@ -877,15 +879,15 @@ def test_dr_shrinkage_using_random_evaluation_policy(\n)\ndm_value = dm.estimate_policy_value_tensor(**input_tensor_dict)\ndr_value = dr.estimate_policy_value_tensor(**input_tensor_dict)\n- dr_shrink_0_value = dr_shrink_0.estimate_policy_value_tensor(**input_tensor_dict)\n- dr_shrink_max_value = dr_shrink_max.estimate_policy_value_tensor(\n+ dr_os_0_value = dr_os_0.estimate_policy_value_tensor(**input_tensor_dict)\n+ dr_os_max_value = dr_os_max.estimate_policy_value_tensor(\n**input_tensor_dict\n)\nassert (\n- dm_value.item() == dr_shrink_0_value.item()\n+ dm_value.item() == dr_os_0_value.item()\n), \"DoublyRobustWithShrinkage (lambda=0) should be the same as DirectMethod\"\nassert (\n- np.abs(dr_value.item() - dr_shrink_max_value.item()) < 1e-5\n+ np.abs(dr_value.item() - dr_os_max_value.item()) < 1e-5\n), \"DoublyRobustWithShrinkage (lambda=inf) should be almost the same as DoublyRobust\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add some tests and rename variables |
641,014 | 10.07.2021 10:25:24 | -32,400 | 089c6f16140998c8bffeba556fbfddcf294316dd | use dict to store mse upper bounds | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -125,7 +125,7 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\nposition = np.zeros(action_dist.shape[0], dtype=int)\n# tune the clipping hyperparameter\n- self.estimated_mse_upper_bound_list = []\n+ self.estimated_mse_upper_bound_dict = dict()\nfor lambda_ in self.lambdas:\nestimated_mse_upper_bound = InverseProbabilityWeighting(\nlambda_=lambda_\n@@ -137,8 +137,10 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\naction_dist=action_dist,\nmax_reward_value=self.max_reward_value,\n)\n- self.estimated_mse_upper_bound_list.append(estimated_mse_upper_bound)\n- self.best_lambda_ = self.lambdas[np.argmin(self.estimated_mse_upper_bound_list)]\n+ self.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\n+ self.best_lambda_ = min(\n+ self.estimated_mse_upper_bound_dict.items(), key=lambda x: x[1]\n+ )[0]\nreturn (\nInverseProbabilityWeighting(lambda_=self.best_lambda_)\n@@ -217,7 +219,7 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\nposition = np.zeros(action_dist.shape[0], dtype=int)\n# tune the clipping hyperparameter\n- self.estimated_mse_upper_bound_list = []\n+ self.estimated_mse_upper_bound_dict = dict()\nfor lambda_ in self.lambdas:\nestimated_mse_upper_bound = InverseProbabilityWeighting(\nlambda_=lambda_\n@@ -229,8 +231,10 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\naction_dist=action_dist,\nmax_reward_value=self.max_reward_value,\n)\n- self.estimated_mse_upper_bound_list.append(estimated_mse_upper_bound)\n- self.best_lambda_ = self.lambdas[np.argmin(self.estimated_mse_upper_bound_list)]\n+ self.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\n+ self.best_lambda_ = min(\n+ self.estimated_mse_upper_bound_dict.items(), key=lambda x: x[1]\n+ )[0]\nestimated_round_rewards = InverseProbabilityWeighting(\nlambda_=self.best_lambda_\n@@ -360,7 +364,7 @@ class DoublyRobustTuning(DoublyRobust):\nposition = np.zeros(action_dist.shape[0], dtype=int)\n# tune the clipping hyperparameter\n- self.estimated_mse_upper_bound_list = []\n+ self.estimated_mse_upper_bound_dict = dict()\nfor lambda_ in self.lambdas:\nestimated_mse_upper_bound = DoublyRobust(\nlambda_=lambda_\n@@ -373,8 +377,10 @@ class DoublyRobustTuning(DoublyRobust):\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\nmax_reward_value=self.max_reward_value,\n)\n- self.estimated_mse_upper_bound_list.append(estimated_mse_upper_bound)\n- self.best_lambda_ = self.lambdas[np.argmin(self.estimated_mse_upper_bound_list)]\n+ self.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\n+ self.best_lambda_ = min(\n+ self.estimated_mse_upper_bound_dict.items(), key=lambda x: x[1]\n+ )[0]\nreturn (\nDoublyRobust(lambda_=self.best_lambda_)\n@@ -460,7 +466,7 @@ class DoublyRobustTuning(DoublyRobust):\nposition = np.zeros(action_dist.shape[0], dtype=int)\n# tune the clipping hyperparameter\n- self.estimated_mse_upper_bound_list = []\n+ self.estimated_mse_upper_bound_dict = dict()\nfor lambda_ in self.lambdas:\nestimated_mse_upper_bound = DoublyRobust(\nlambda_=lambda_\n@@ -473,8 +479,10 @@ class DoublyRobustTuning(DoublyRobust):\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\nmax_reward_value=self.max_reward_value,\n)\n- self.estimated_mse_upper_bound_list.append(estimated_mse_upper_bound)\n- self.best_lambda_ = self.lambdas[np.argmin(self.estimated_mse_upper_bound_list)]\n+ self.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\n+ self.best_lambda_ = min(\n+ self.estimated_mse_upper_bound_dict.items(), key=lambda x: x[1]\n+ )[0]\nestimated_round_rewards = DoublyRobust(\nlambda_=self.best_lambda_\n@@ -606,7 +614,7 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\nposition = np.zeros(action_dist.shape[0], dtype=int)\n# tune the switching hyperparameter\n- self.estimated_mse_upper_bound_list = []\n+ self.estimated_mse_upper_bound_dict = dict()\nfor tau_ in self.taus:\nestimated_mse_upper_bound = SwitchDoublyRobust(\ntau=tau_\n@@ -619,8 +627,10 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\nmax_reward_value=self.max_reward_value,\n)\n- self.estimated_mse_upper_bound_list.append(estimated_mse_upper_bound)\n- self.best_tau = self.taus[np.argmin(self.estimated_mse_upper_bound_list)]\n+ self.estimated_mse_upper_bound_dict[tau_] = estimated_mse_upper_bound\n+ self.best_tau = min(\n+ self.estimated_mse_upper_bound_dict.items(), key=lambda x: x[1]\n+ )[0]\nreturn (\nSwitchDoublyRobust(tau=self.best_tau)\n@@ -706,7 +716,7 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\nposition = np.zeros(action_dist.shape[0], dtype=int)\n# tune the switching hyperparameter\n- self.estimated_mse_upper_bound_list = []\n+ self.estimated_mse_upper_bound_dict = dict()\nfor tau_ in self.taus:\nestimated_mse_upper_bound = SwitchDoublyRobust(\ntau=tau_\n@@ -719,8 +729,10 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\nmax_reward_value=self.max_reward_value,\n)\n- self.estimated_mse_upper_bound_list.append(estimated_mse_upper_bound)\n- self.best_tau = self.taus[np.argmin(self.estimated_mse_upper_bound_list)]\n+ self.estimated_mse_upper_bound_dict[tau_] = estimated_mse_upper_bound\n+ self.best_tau = min(\n+ self.estimated_mse_upper_bound_dict.items(), key=lambda x: x[1]\n+ )[0]\nestimated_round_rewards = SwitchDoublyRobust(\ntau=self.best_tau\n@@ -851,7 +863,7 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\nposition = np.zeros(action_dist.shape[0], dtype=int)\n# tune the shrinkage hyperparameter\n- self.estimated_mse_upper_bound_list = []\n+ self.estimated_mse_upper_bound_dict = dict()\nfor lambda_ in self.lambdas:\nestimated_mse_upper_bound = DoublyRobustWithShrinkage(\nlambda_=lambda_\n@@ -864,8 +876,10 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\nmax_reward_value=self.max_reward_value,\n)\n- self.estimated_mse_upper_bound_list.append(estimated_mse_upper_bound)\n- self.best_lambda_ = self.lambdas[np.argmin(self.estimated_mse_upper_bound_list)]\n+ self.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\n+ self.best_lambda_ = min(\n+ self.estimated_mse_upper_bound_dict.items(), key=lambda x: x[1]\n+ )[0]\nreturn (\nDoublyRobustWithShrinkage(lambda_=self.best_lambda_)\n@@ -951,7 +965,7 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\nposition = np.zeros(action_dist.shape[0], dtype=int)\n# tune the shrinkage hyperparameter\n- self.estimated_mse_upper_bound_list = []\n+ self.estimated_mse_upper_bound_dict = dict()\nfor lambda_ in self.lambdas:\nestimated_mse_upper_bound = DoublyRobustWithShrinkage(\nlambda_=lambda_\n@@ -964,8 +978,10 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\nmax_reward_value=self.max_reward_value,\n)\n- self.estimated_mse_upper_bound_list.append(estimated_mse_upper_bound)\n- self.best_lambda_ = self.lambdas[np.argmin(self.estimated_mse_upper_bound_list)]\n+ self.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\n+ self.best_lambda_ = min(\n+ self.estimated_mse_upper_bound_dict.items(), key=lambda x: x[1]\n+ )[0]\nestimated_round_rewards = DoublyRobustWithShrinkage(\nlambda_=self.best_lambda_\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | use dict to store mse upper bounds |
641,014 | 10.07.2021 14:55:10 | -32,400 | f1a1259686c6c00b4e535862d020e3c889d9912c | apply direct bias estimation | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -568,10 +568,6 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nposition: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\n- max_reward_value: int or float, default=None\n- A maximum possible reward, which is necessary for the hyperparameter tuning.\n- If None is given, `reward.max()` is used.\n-\nReturns\n----------\nestimated_mse_upper_bound: float\n@@ -580,9 +576,6 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nbased on Section 5 of Su et al.(2020).\n\"\"\"\n- if max_reward_value is None:\n- max_reward_value = reward.max()\n-\nn_rounds = reward.shape[0]\n# estimate the variance of IPW with clipping\nvar_hat = np.var(\n@@ -599,7 +592,7 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\n# estimate the upper bound of the bias of IPW with clipping\niw = action_dist[np.arange(n_rounds), action, position] / pscore\niw_hat = np.minimum(iw, self.lambda_)\n- bias_upper_bound_arr = (iw - iw_hat) * max_reward_value\n+ bias_upper_bound_arr = (iw - iw_hat) * reward\nbias_upper_bound = bias_upper_bound_arr.mean()\nestimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\n@@ -1287,7 +1280,6 @@ class DoublyRobust(BaseOffPolicyEstimator):\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\nposition: Optional[np.ndarray] = None,\n- max_reward_value: Optional[Union[int, float]] = None,\n) -> float:\n\"\"\"Estimate the upper bound of the mean-squared-error of DR with a given clipping hyperparameter.\n@@ -1311,10 +1303,6 @@ class DoublyRobust(BaseOffPolicyEstimator):\nestimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\nExpected rewards for each round, action, and position estimated by a regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\n- max_reward_value: int or float, default=None\n- A maximum possible reward, which is necessary for the hyperparameter tuning.\n- If None is given, `reward.max()` is used.\n-\nReturns\n----------\nestimated_mse_upper_bound: float\n@@ -1323,9 +1311,6 @@ class DoublyRobust(BaseOffPolicyEstimator):\nbased on Section 5 of Su et al.(2020).\n\"\"\"\n- if max_reward_value is None:\n- max_reward_value = reward.max()\n-\nn_rounds = reward.shape[0]\n# estimate the variance of DR with clipping\nvar_hat = np.var(\n@@ -1343,7 +1328,10 @@ class DoublyRobust(BaseOffPolicyEstimator):\n# estimate the upper bound of the bias of DR with clipping\niw = action_dist[np.arange(n_rounds), action, position] / pscore\niw_hat = np.minimum(iw, self.lambda_)\n- bias_upper_bound_arr = (iw - iw_hat) * max_reward_value\n+ q_hat = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), action, position\n+ ]\n+ bias_upper_bound_arr = (iw - iw_hat) * (reward - q_hat)\nbias_upper_bound = bias_upper_bound_arr.mean()\nestimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\n@@ -1588,7 +1576,6 @@ class SwitchDoublyRobust(DoublyRobust):\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\nposition: Optional[np.ndarray] = None,\n- max_reward_value: Optional[Union[int, float]] = None,\n) -> float:\n\"\"\"Estimate the upper bound of the mean-squared-error of Switch-DR with a given hyperparameter.\n@@ -1612,10 +1599,6 @@ class SwitchDoublyRobust(DoublyRobust):\nposition: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\n- max_reward_value: int or float, default=None\n- A maximum possible reward, which is necessary for the hyperparameter tuning.\n- If None is given, `reward.max()` is used.\n-\nReturns\n----------\nestimated_mse_upper_bound: float\n@@ -1624,9 +1607,6 @@ class SwitchDoublyRobust(DoublyRobust):\nbased on Section 5 of Su et al.(2020).\n\"\"\"\n- if max_reward_value is None:\n- max_reward_value = reward.max()\n-\nn_rounds = reward.shape[0]\n# estimate the variance of Switch-DR (Eq.(8) of Wang et al.(2017))\nvar_hat = np.var(\n@@ -1644,7 +1624,10 @@ class SwitchDoublyRobust(DoublyRobust):\n# estimate the upper bound of the bias of Switch-DR\niw = action_dist[np.arange(n_rounds), action, position] / pscore\niw_hat = iw * np.array(iw <= self.tau, dtype=int)\n- bias_upper_bound_arr = (iw - iw_hat) * max_reward_value\n+ q_hat = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), action, position\n+ ]\n+ bias_upper_bound_arr = (iw - iw_hat) * (reward - q_hat)\nbias_upper_bound = bias_upper_bound_arr.mean()\nestimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\n@@ -1791,7 +1774,6 @@ class DoublyRobustWithShrinkage(DoublyRobust):\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\nposition: Optional[np.ndarray] = None,\n- max_reward_value: Optional[Union[int, float]] = None,\n) -> float:\n\"\"\"Estimate the upper bound of the mean-squared-error of DR with a given hyperparameter.\n@@ -1815,10 +1797,6 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nposition: array-like, shape (n_rounds,), default=None\nPositions of each round in the given logged bandit feedback.\n- max_reward_value: int or float, default=None\n- A maximum possible reward, which is necessary for the hyperparameter tuning.\n- If None is given, `reward.max()` is used.\n-\nReturns\n----------\nestimated_mse_upper_bound: float\n@@ -1827,9 +1805,6 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nbased on Section 5 of Su et al.(2020).\n\"\"\"\n- if max_reward_value is None:\n- max_reward_value = reward.max()\n-\nn_rounds = reward.shape[0]\n# estimate the variance of DRos\nvar_hat = np.var(\n@@ -1850,7 +1825,10 @@ class DoublyRobustWithShrinkage(DoublyRobust):\niw_hat = (self.lambda_ * iw) / (iw ** 2 + self.lambda_)\nelse:\niw_hat = iw\n- bias_upper_bound_arr = (iw - iw_hat) * max_reward_value\n+ q_hat = estimated_rewards_by_reg_model[\n+ np.arange(n_rounds), action, position\n+ ]\n+ bias_upper_bound_arr = (iw - iw_hat) * (reward - q_hat)\nbias_upper_bound = bias_upper_bound_arr.mean()\nestimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -28,10 +28,8 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\n----------\nlambdas: List[float]\nA list of candidate clipping hyperparameters.\n- The automatic hyperparameter tuning proposed by Wang et al.(2017) will choose the best hyperparameter value from the data.\n-\n- max_reward_value: int or float, default=None\n- A maximum possible reward, which is necessary for the hyperparameter tuning.\n+ The automatic hyperparameter tuning proposed by Su et al.(2020)\n+ will choose the best hyperparameter value from the data.\nestimator_name: str, default='ipw'.\nName of off-policy estimator.\n@@ -47,7 +45,6 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\n\"\"\"\nlambdas: List[float] = None\n- max_reward_value: Optional[Union[int, float]] = None\nestimator_name = \"ipw\"\ndef __post_init__(self) -> None:\n@@ -66,12 +63,6 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\nraise ValueError(\"an element of lambdas must not be nan\")\nelse:\nraise TypeError(\"lambdas must be a list\")\n- if self.max_reward_value is not None:\n- check_scalar(\n- self.max_reward_value,\n- name=\"max_reward_value\",\n- target_type=(int, float),\n- )\ndef estimate_policy_value(\nself,\n@@ -135,7 +126,6 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\nposition=position,\npscore=pscore,\naction_dist=action_dist,\n- max_reward_value=self.max_reward_value,\n)\nself.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\nself.best_lambda_ = min(\n@@ -229,7 +219,6 @@ class InverseProbabilityWeightingTuning(InverseProbabilityWeighting):\nposition=position,\npscore=pscore,\naction_dist=action_dist,\n- max_reward_value=self.max_reward_value,\n)\nself.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\nself.best_lambda_ = min(\n@@ -261,10 +250,8 @@ class DoublyRobustTuning(DoublyRobust):\n----------\nlambdas: List[float]\nA list of candidate clipping hyperparameters.\n- The automatic hyperparameter tuning proposed by Wang et al.(2017) will choose the best hyperparameter value from the data.\n-\n- max_reward_value: int or float, default=None\n- A maximum possible reward, which is necessary for the hyperparameter tuning.\n+ The automatic hyperparameter tuning proposed by Su et al.(2020)\n+ will choose the best hyperparameter value from the data.\nestimator_name: str, default='dr'.\nName of off-policy estimator.\n@@ -280,7 +267,6 @@ class DoublyRobustTuning(DoublyRobust):\n\"\"\"\nlambdas: List[float] = None\n- max_reward_value: Optional[Union[int, float]] = None\nestimator_name = \"dr\"\ndef __post_init__(self) -> None:\n@@ -299,12 +285,6 @@ class DoublyRobustTuning(DoublyRobust):\nraise ValueError(\"an element of lambdas must not be nan\")\nelse:\nraise TypeError(\"lambdas must be a list\")\n- if self.max_reward_value is not None:\n- check_scalar(\n- self.max_reward_value,\n- name=\"max_reward_value\",\n- target_type=(int, float),\n- )\ndef estimate_policy_value(\nself,\n@@ -375,7 +355,6 @@ class DoublyRobustTuning(DoublyRobust):\npscore=pscore,\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n- max_reward_value=self.max_reward_value,\n)\nself.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\nself.best_lambda_ = min(\n@@ -477,7 +456,6 @@ class DoublyRobustTuning(DoublyRobust):\npscore=pscore,\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n- max_reward_value=self.max_reward_value,\n)\nself.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\nself.best_lambda_ = min(\n@@ -510,11 +488,8 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\n----------\ntaus: List[float]\nA list of candidate switching hyperparameters.\n- The automatic hyperparameter tuning proposed by Wang et al.(2017) will choose the best hyperparameter value from the data.\n-\n- max_reward_value: int or float, default=None\n- A maximum possible reward, which is necessary for the hyperparameter tuning.\n- If None is given, `reward.max()` is used.\n+ The automatic hyperparameter tuning proposed by Su et al.(2020)\n+ will choose the best hyperparameter value from the data.\nestimator_name: str, default='switch-dr'.\nName of off-policy estimator.\n@@ -530,7 +505,6 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\n\"\"\"\ntaus: List[float] = None\n- max_reward_value: Optional[float] = None\nestimator_name: str = \"switch-dr\"\ndef __post_init__(self) -> None:\n@@ -549,12 +523,6 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\nraise ValueError(\"an element of taus must not be nan\")\nelse:\nraise TypeError(\"taus must be a list\")\n- if self.max_reward_value is not None:\n- check_scalar(\n- self.max_reward_value,\n- name=\"max_reward_value\",\n- target_type=(int, float),\n- )\ndef estimate_policy_value(\nself,\n@@ -625,7 +593,6 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\npscore=pscore,\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n- max_reward_value=self.max_reward_value,\n)\nself.estimated_mse_upper_bound_dict[tau_] = estimated_mse_upper_bound\nself.best_tau = min(\n@@ -727,7 +694,6 @@ class SwitchDoublyRobustTuning(SwitchDoublyRobust):\npscore=pscore,\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n- max_reward_value=self.max_reward_value,\n)\nself.estimated_mse_upper_bound_dict[tau_] = estimated_mse_upper_bound\nself.best_tau = min(\n@@ -760,10 +726,8 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\n----------\nlambdas: List[float]\nA list of candidate shrinkage hyperparameters.\n- The automatic hyperparameter tuning proposed by Wang et al.(2017) will choose the best hyperparameter value from the data.\n-\n- max_reward_value: int or float, default=None\n- A maximum possible reward, which is necessary for the hyperparameter tuning.\n+ The automatic hyperparameter tuning proposed by Su et al.(2020)\n+ will choose the best hyperparameter value from the data.\nestimator_name: str, default='dr-os'.\nName of off-policy estimator.\n@@ -779,7 +743,6 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\n\"\"\"\nlambdas: List[float] = None\n- max_reward_value: Optional[Union[int, float]] = None\nestimator_name = \"dr-os\"\ndef __post_init__(self) -> None:\n@@ -798,12 +761,6 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\nraise ValueError(\"an element of lambdas must not be nan\")\nelse:\nraise TypeError(\"lambdas must be a list\")\n- if self.max_reward_value is not None:\n- check_scalar(\n- self.max_reward_value,\n- name=\"max_reward_value\",\n- target_type=(int, float),\n- )\ndef estimate_policy_value(\nself,\n@@ -874,7 +831,6 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\npscore=pscore,\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n- max_reward_value=self.max_reward_value,\n)\nself.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\nself.best_lambda_ = min(\n@@ -976,7 +932,6 @@ class DoublyRobustWithShrinkageTuning(DoublyRobustWithShrinkage):\npscore=pscore,\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n- max_reward_value=self.max_reward_value,\n)\nself.estimated_mse_upper_bound_dict[lambda_] = estimated_mse_upper_bound\nself.best_lambda_ = min(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | apply direct bias estimation |
641,014 | 10.07.2021 15:49:44 | -32,400 | df8517000ab794f0860147b731d5b197a252d830 | add high prob deviation bound term to estiamte bias upper bound | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -593,7 +593,11 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\niw = action_dist[np.arange(n_rounds), action, position] / pscore\niw_hat = np.minimum(iw, self.lambda_)\nbias_upper_bound_arr = (iw - iw_hat) * reward\n- bias_upper_bound = bias_upper_bound_arr.mean()\n+ bias_upper_bound = np.abs(bias_upper_bound_arr.mean())\n+ bias_upper_bound += np.sqrt(\n+ (2 * (iw ** 2).mean() * np.log(40)) / n_rounds\n+ ) # \\delta=0.05\n+ bias_upper_bound += (2 * iw.max() * np.log(40)) / (3 * n_rounds)\nestimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\nreturn estimated_mse_upper_bound\n@@ -1328,11 +1332,13 @@ class DoublyRobust(BaseOffPolicyEstimator):\n# estimate the upper bound of the bias of DR with clipping\niw = action_dist[np.arange(n_rounds), action, position] / pscore\niw_hat = np.minimum(iw, self.lambda_)\n- q_hat = estimated_rewards_by_reg_model[\n- np.arange(n_rounds), action, position\n- ]\n+ q_hat = estimated_rewards_by_reg_model[np.arange(n_rounds), action, position]\nbias_upper_bound_arr = (iw - iw_hat) * (reward - q_hat)\n- bias_upper_bound = bias_upper_bound_arr.mean()\n+ bias_upper_bound = np.abs(bias_upper_bound_arr.mean())\n+ bias_upper_bound += np.sqrt(\n+ (2 * (iw ** 2).mean() * np.log(40)) / n_rounds\n+ ) # \\delta=0.05\n+ bias_upper_bound += (2 * iw.max() * np.log(40)) / (3 * n_rounds)\nestimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\nreturn estimated_mse_upper_bound\n@@ -1624,11 +1630,13 @@ class SwitchDoublyRobust(DoublyRobust):\n# estimate the upper bound of the bias of Switch-DR\niw = action_dist[np.arange(n_rounds), action, position] / pscore\niw_hat = iw * np.array(iw <= self.tau, dtype=int)\n- q_hat = estimated_rewards_by_reg_model[\n- np.arange(n_rounds), action, position\n- ]\n+ q_hat = estimated_rewards_by_reg_model[np.arange(n_rounds), action, position]\nbias_upper_bound_arr = (iw - iw_hat) * (reward - q_hat)\n- bias_upper_bound = bias_upper_bound_arr.mean()\n+ bias_upper_bound = np.abs(bias_upper_bound_arr.mean())\n+ bias_upper_bound += np.sqrt(\n+ (2 * (iw ** 2).mean() * np.log(40)) / n_rounds\n+ ) # \\delta=0.05\n+ bias_upper_bound += (2 * iw.max() * np.log(40)) / (3 * n_rounds)\nestimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\nreturn estimated_mse_upper_bound\n@@ -1825,11 +1833,13 @@ class DoublyRobustWithShrinkage(DoublyRobust):\niw_hat = (self.lambda_ * iw) / (iw ** 2 + self.lambda_)\nelse:\niw_hat = iw\n- q_hat = estimated_rewards_by_reg_model[\n- np.arange(n_rounds), action, position\n- ]\n+ q_hat = estimated_rewards_by_reg_model[np.arange(n_rounds), action, position]\nbias_upper_bound_arr = (iw - iw_hat) * (reward - q_hat)\n- bias_upper_bound = bias_upper_bound_arr.mean()\n+ bias_upper_bound = np.abs(bias_upper_bound_arr.mean())\n+ bias_upper_bound += np.sqrt(\n+ (2 * (iw ** 2).mean() * np.log(40)) / n_rounds\n+ ) # \\delta=0.05\n+ bias_upper_bound += (2 * iw.max() * np.log(40)) / (3 * n_rounds)\nestimated_mse_upper_bound = var_hat + (bias_upper_bound ** 2)\nreturn estimated_mse_upper_bound\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add high prob deviation bound term to estiamte bias upper bound |
641,006 | 11.07.2021 15:42:54 | -32,400 | 01b1fdc1fad16376cd125802e4fdd620a600d390 | rephrase polict_logit -> policy_softmax; fix typo comment | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -359,8 +359,8 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nreturn pscores\n- def _calc_pscore_given_policy_value(\n- self, all_slate_actions: np.ndarray, policy_value_i_: np.ndarray\n+ def _calc_pscore_given_policy_softmax(\n+ self, all_slate_actions: np.ndarray, policy_softmax_i_: np.ndarray\n) -> np.ndarray:\n\"\"\"Calculate the propensity score of each of the possible slate actions given policy_logit.\n@@ -369,7 +369,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nall_slate_actions: array-like, (n_action, len_list)\nAll possible slate actions.\n- policy_value_i_: array-like, (n_unique_action, )\n+ policy_softmax_i_: array-like, (n_unique_action, )\nPolicy values given context (:math:`x`), i.e., :math:`\\\\f: \\\\mathcal{X} \\\\rightarrow \\\\mathbb{R}^{\\\\mathcal{A}}`.\nReturns\n@@ -385,7 +385,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\naction_index = np.where(\nunique_action_set_2d == all_slate_actions[:, position_][:, np.newaxis]\n)[1]\n- score_ = policy_value_i_[unique_action_set_2d]\n+ score_ = policy_softmax_i_[unique_action_set_2d]\npscores *= np.divide(score_, score_.sum(axis=1, keepdims=True))[\nnp.arange(n_actions), action_index\n]\n@@ -463,7 +463,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ntarget_type=(float),\nmax_val=700.0,\n)\n- evaluation_policy_value_ = np.exp(\n+ evaluation_policy_softmax_ = np.exp(\nnp.minimum(evaluation_policy_logit_, clip_logit_value)\n)\nfor i in tqdm(\n@@ -496,9 +496,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore_item_position_i_l = score_[action_index_]\nelse:\nif isinstance(clip_logit_value, float):\n- pscores = self._calc_pscore_given_policy_value(\n+ pscores = self._calc_pscore_given_policy_softmax(\nall_slate_actions=enumerated_slate_actions,\n- policy_value_i_=evaluation_policy_value_[i],\n+ policy_softmax_i_=evaluation_policy_softmax_[i],\n)\nelse:\npscores = self._calc_pscore_given_policy_logit(\n@@ -586,7 +586,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ntarget_type=(float),\nmax_val=700.0,\n)\n- behavior_policy_value_ = np.exp(\n+ behavior_policy_softmax_ = np.exp(\nnp.minimum(behavior_policy_logit_, clip_logit_value)\n)\nfor i in tqdm(\n@@ -626,9 +626,9 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\npscore_item_position_i_l = pscore_i\nelse:\nif isinstance(clip_logit_value, float):\n- pscores = self._calc_pscore_given_policy_value(\n+ pscores = self._calc_pscore_given_policy_softmax(\nall_slate_actions=enumerated_slate_actions,\n- policy_value_i_=behavior_policy_value_[i],\n+ policy_softmax_i_=behavior_policy_softmax_[i],\n)\nelse:\npscores = self._calc_pscore_given_policy_logit(\n@@ -731,7 +731,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nA boolean parameter whether `pscore_item_position` is returned or not.\nWhen `n_unique_action` and `len_list` are large, this parameter should be set to False because of the computational time.\n- clip_softmax_value: Optional[float], default=None\n+ clip_logit_value: Optional[float], default=None\nA float parameter to clip logit value.\nWhen None is given, we calculate softmax values without clipping to obtain `pscore_item_position`.\nWhen a float value is given, we clip logit values to calculate softmax values to obtain `pscore_item_position`.\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | rephrase polict_logit -> policy_softmax; fix typo comment |
641,014 | 12.07.2021 18:54:57 | -32,400 | 3a04cd8eda180028191355a1af65f87720d3aec4 | rm some tests as new classes do not have to adjust to tensor inputs | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -215,13 +215,17 @@ def test_dr_tuning_init_using_valid_input_data(lambdas_taus, description):\n# prepare instances\ndm = DirectMethod()\ndr = DoublyRobust()\n-dr_tuning = DoublyRobustTuning(lambdas=[1, 100])\n+dr_tuning = DoublyRobustTuning(lambdas=[1, 100], estimator_name=\"dr_tuning\")\ndr_os_0 = DoublyRobustWithShrinkage(lambda_=0.0)\n-dr_os_tuning = DoublyRobustWithShrinkageTuning(lambdas=[1, 100])\n+dr_os_tuning = DoublyRobustWithShrinkageTuning(\n+ lambdas=[1, 100], estimator_name=\"dr_os_tuning\"\n+)\ndr_os_max = DoublyRobustWithShrinkage(lambda_=np.inf)\nsndr = SelfNormalizedDoublyRobust()\nswitch_dr_0 = SwitchDoublyRobust(tau=0.0)\n-switch_dr_tuning = SwitchDoublyRobustTuning(taus=[1, 100])\n+switch_dr_tuning = SwitchDoublyRobustTuning(\n+ taus=[1, 100], estimator_name=\"switch_dr_tuning\"\n+)\nswitch_dr_max = SwitchDoublyRobust(tau=np.inf)\ndr_estimators = [\n@@ -781,7 +785,7 @@ def test_dr_using_random_evaluation_policy(\n),\n):\n_ = estimator.estimate_policy_value_tensor(**input_tensor_dict)\n- else:\n+ elif \"tuning\" not in estimator.estimator_name:\nestimated_policy_value = estimator.estimate_policy_value_tensor(\n**input_tensor_dict\n)\n@@ -802,7 +806,7 @@ def test_dr_using_random_evaluation_policy(\n),\n):\n_ = estimator.estimate_policy_value_tensor(**input_tensor_dict)\n- else:\n+ elif \"tuning\" not in estimator.estimator_name:\nwith pytest.raises(\nTypeError,\nmatch=re.escape(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | rm some tests as new classes do not have to adjust to tensor inputs |
641,014 | 12.07.2021 19:16:21 | -32,400 | 7acd210bbe2c0e7a76c2073b58deb14c1556e83f | fix a wrong wording | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -205,7 +205,7 @@ class ReplayMethod(BaseOffPolicyEstimator):\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n@@ -496,7 +496,7 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n@@ -882,7 +882,7 @@ class DirectMethod(BaseOffPolicyEstimator):\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n@@ -1227,7 +1227,7 @@ class DoublyRobust(BaseOffPolicyEstimator):\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -183,7 +183,7 @@ class BaseOffPolicyEstimatorTuning:\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n@@ -345,7 +345,7 @@ class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n@@ -523,7 +523,7 @@ class DoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n@@ -705,7 +705,7 @@ class SwitchDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n@@ -905,7 +905,7 @@ class DoublyRobustWithShrinkageTuning(BaseOffPolicyEstimatorTuning):\nPositions of each round in the given logged bandit feedback.\nalpha: float, default=0.05\n- P-value.\n+ Significance level.\nn_bootstrap_samples: int, default=10000\nNumber of resampling performed in the bootstrap procedure.\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix a wrong wording |
641,014 | 13.07.2021 12:56:51 | -32,400 | 97a569e36c0651a766814e2113bd8f508e8006dc | add estimators with hyperparam tuning | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -50,3 +50,11 @@ __all_estimators__ = [\n\"SwitchDoublyRobust\",\n\"SelfNormalizedDoublyRobust\",\n]\n+\n+\n+__all_estimators_tuning__ = [\n+ \"InverseProbabilityWeightingTuning\",\n+ \"DoublyRobustTuning\",\n+ \"SwitchDoublyRobustTuning\",\n+ \"DoublyRobustWithShrinkageTuning\",\n+]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_all_estimators.py",
"new_path": "tests/ope/test_all_estimators.py",
"diff": "@@ -13,7 +13,7 @@ from conftest import generate_action_dist\n# action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\ninvalid_input_of_estimation = [\n(\n- None,\n+ None, #\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n@@ -22,7 +22,7 @@ invalid_input_of_estimation = [\n\"action_dist must be ndarray\",\n),\n(\n- generate_action_dist(5, 4, 1)[:, :, 0],\n+ generate_action_dist(5, 4, 1)[:, :, 0], #\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n@@ -31,7 +31,7 @@ invalid_input_of_estimation = [\n\"action_dist.ndim must be 3-dimensional\",\n),\n(\n- np.ones((5, 4, 3)),\n+ np.ones((5, 4, 3)), #\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n@@ -44,7 +44,7 @@ invalid_input_of_estimation = [\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n- \"4\",\n+ \"4\", #\nnp.zeros((5, 4, 3)),\n\"position must be ndarray\",\n),\n@@ -53,7 +53,7 @@ invalid_input_of_estimation = [\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n- np.zeros((5, 4), dtype=int),\n+ np.zeros((5, 4), dtype=int), #\nnp.zeros((5, 4, 3)),\n\"position must be 1-dimensional\",\n),\n@@ -62,7 +62,7 @@ invalid_input_of_estimation = [\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n- np.zeros(5),\n+ np.zeros(5), #\nnp.zeros((5, 4, 3)),\n\"position elements must be non-negative integers\",\n),\n@@ -71,7 +71,7 @@ invalid_input_of_estimation = [\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n- np.zeros(5, dtype=int) - 1,\n+ np.zeros(5, dtype=int) - 1, #\nnp.zeros((5, 4, 3)),\n\"position elements must be non-negative integers\",\n),\n@@ -80,7 +80,7 @@ invalid_input_of_estimation = [\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n- np.zeros(4, dtype=int),\n+ np.zeros(4, dtype=int), #\nnp.zeros((5, 4, 3)),\n\"the first dimension of position and the first dimension of action_dist must be the same.\",\n),\n@@ -89,7 +89,7 @@ invalid_input_of_estimation = [\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n- np.ones(5, dtype=int) * 8,\n+ np.ones(5, dtype=int) * 8, #\nnp.zeros((5, 4, 3)),\n\"position elements must be smaller than the third dimension of action_dist\",\n),\n@@ -98,7 +98,7 @@ invalid_input_of_estimation = [\nnp.zeros(5, dtype=int),\nnp.zeros(5, dtype=int),\nnp.ones(5),\n- None,\n+ None, #\nnp.zeros((5, 4, 3)),\n\"position elements must be given when the third dimension of action_dist is greater than 1\",\n),\n@@ -152,6 +152,11 @@ def test_estimation_of_all_estimators_using_invalid_input_data(\nestimators = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n]\n+ all_estimators_tuning = ope.__all_estimators_tuning__\n+ estimators_tuning = [\n+ getattr(ope.estimators_tuning, estimator_name)([1, 100, 10000, np.inf])\n+ for estimator_name in all_estimators_tuning\n+ ]\n# estimate_intervals function raises ValueError of all estimators\nfor estimator in estimators:\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n@@ -174,6 +179,39 @@ def test_estimation_of_all_estimators_using_invalid_input_data(\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n)\n+ for estimator_tuning in estimators_tuning:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ est = estimator_tuning.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ assert est == 0.0, f\"policy value must be 0, but {est}\"\n+ assert hasattr(\n+ estimator_tuning, \"best_hyperparam\"\n+ ), \"estimator_tuning should have `best_hyperparam` attr\"\n+ assert hasattr(\n+ estimator_tuning, \"estimated_mse_score_dict\"\n+ ), \"estimator_tuning should have `estimated_mse_score_dict` attr\"\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = estimator_tuning.estimate_interval(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ assert hasattr(\n+ estimator_tuning, \"best_hyperparam\"\n+ ), \"estimator_tuning should have `best_hyperparam` attr\"\n+ assert hasattr(\n+ estimator_tuning, \"estimated_mse_score_dict\"\n+ ), \"estimator_tuning should have `estimated_mse_score_dict` attr\"\n+\[email protected](\n\"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\",\n@@ -192,6 +230,11 @@ def test_estimation_of_all_estimators_using_valid_input_data(\nestimators = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n]\n+ all_estimators_tuning = ope.__all_estimators_tuning__\n+ estimators_tuning = [\n+ getattr(ope.estimators_tuning, estimator_name)([1, 100, 10000, np.inf])\n+ for estimator_name in all_estimators_tuning\n+ ]\n# estimate_intervals function raises ValueError of all estimators\nfor estimator in estimators:\n_ = estimator.estimate_policy_value(\n@@ -210,12 +253,29 @@ def test_estimation_of_all_estimators_using_valid_input_data(\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n)\n+ for estimator_tuning in estimators_tuning:\n+ _ = estimator_tuning.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n+ _ = estimator_tuning.estimate_interval(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ )\n# action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\ninvalid_input_of_estimation_tensor = [\n(\n- None,\n+ None, #\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n@@ -224,7 +284,7 @@ invalid_input_of_estimation_tensor = [\n\"action_dist must be Tensor\",\n),\n(\n- torch.Tensor(generate_action_dist(5, 4, 1)[:, :, 0]),\n+ torch.Tensor(generate_action_dist(5, 4, 1)[:, :, 0]), #\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n@@ -233,7 +293,7 @@ invalid_input_of_estimation_tensor = [\n\"action_dist.ndim must be 3-dimensional\",\n),\n(\n- torch.from_numpy(np.ones((5, 4, 3))),\n+ torch.from_numpy(np.ones((5, 4, 3))), #\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n@@ -246,7 +306,7 @@ invalid_input_of_estimation_tensor = [\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n- \"4\",\n+ \"4\", #\ntorch.from_numpy(np.zeros((5, 4, 3))),\n\"position must be Tensor\",\n),\n@@ -255,7 +315,7 @@ invalid_input_of_estimation_tensor = [\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n- torch.from_numpy(np.zeros((5, 4), dtype=int)),\n+ torch.from_numpy(np.zeros((5, 4), dtype=int)), #\ntorch.from_numpy(np.zeros((5, 4, 3))),\n\"position must be 1-dimensional\",\n),\n@@ -264,7 +324,7 @@ invalid_input_of_estimation_tensor = [\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n- torch.from_numpy(np.zeros(5)),\n+ torch.from_numpy(np.zeros(5)), #\ntorch.from_numpy(np.zeros((5, 4, 3))),\n\"position elements must be non-negative integers\",\n),\n@@ -273,7 +333,7 @@ invalid_input_of_estimation_tensor = [\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n- torch.from_numpy(np.zeros(5, dtype=int) - 1),\n+ torch.from_numpy(np.zeros(5, dtype=int) - 1), #\ntorch.from_numpy(np.zeros((5, 4, 3))),\n\"position elements must be non-negative integers\",\n),\n@@ -282,7 +342,7 @@ invalid_input_of_estimation_tensor = [\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n- torch.from_numpy(np.zeros(4, dtype=int)),\n+ torch.from_numpy(np.zeros(4, dtype=int)), #\ntorch.from_numpy(np.zeros((5, 4, 3))),\n\"the first dimension of position and the first dimension of action_dist must be the same.\",\n),\n@@ -291,7 +351,7 @@ invalid_input_of_estimation_tensor = [\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n- torch.from_numpy(np.ones(5, dtype=int) * 8),\n+ torch.from_numpy(np.ones(5, dtype=int) * 8), #\ntorch.from_numpy(np.zeros((5, 4, 3))),\n\"position elements must be smaller than the third dimension of action_dist\",\n),\n@@ -300,7 +360,7 @@ invalid_input_of_estimation_tensor = [\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.zeros(5, dtype=int)),\ntorch.from_numpy(np.ones(5)),\n- None,\n+ None, #\ntorch.from_numpy(np.zeros((5, 4, 3))),\n\"position elements must be given when the third dimension of action_dist is greater than 1\",\n),\n@@ -458,6 +518,11 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\nestimators = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n]\n+ all_estimators_tuning = ope.__all_estimators_tuning__\n+ estimators_tuning = [\n+ getattr(ope.estimators_tuning, estimator_name)([1, 100, 10000, np.inf])\n+ for estimator_name in all_estimators_tuning\n+ ]\n# estimate_intervals function raises ValueError of all estimators\nfor estimator in estimators:\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n@@ -472,6 +537,19 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n)\n+ for estimator_tuning in estimators_tuning:\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = estimator_tuning.estimate_interval(\n+ reward=bandit_feedback[\"reward\"],\n+ action=bandit_feedback[\"action\"],\n+ position=bandit_feedback[\"position\"],\n+ pscore=bandit_feedback[\"pscore\"],\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=expected_reward,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\[email protected](\n@@ -497,6 +575,11 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\nestimators = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n]\n+ all_estimators_tuning = ope.__all_estimators_tuning__\n+ estimators_tuning = [\n+ getattr(ope.estimators_tuning, estimator_name)([1, 100, 10000, np.inf])\n+ for estimator_name in all_estimators_tuning\n+ ]\n# estimate_intervals function raises ValueError of all estimators\nfor estimator in estimators:\n_ = estimator.estimate_interval(\n@@ -510,6 +593,18 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n)\n+ for estimator_tuning in estimators_tuning:\n+ _ = estimator_tuning.estimate_interval(\n+ reward=bandit_feedback[\"reward\"],\n+ action=bandit_feedback[\"action\"],\n+ position=bandit_feedback[\"position\"],\n+ pscore=bandit_feedback[\"pscore\"],\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=expected_reward,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\ndef test_fixture(\n@@ -544,11 +639,17 @@ def test_performance_of_ope_estimators_using_random_evaluation_policy(\ngt_std = q_pi_e.std(ddof=1)\n# test most of the estimators (ReplayMethod is not tested because it is out of scope)\nall_estimators = ope.__all_estimators__\n- estimators = [\n+ estimators_standard = [\ngetattr(ope.estimators, estimator_name)()\nfor estimator_name in all_estimators\nif estimator_name not in [\"ReplayMethod\"]\n]\n+ all_estimators_tuning = ope.__all_estimators_tuning__\n+ estimators_tuning = [\n+ getattr(ope.estimators_tuning, estimator_name)([1, 100, 10000, np.inf])\n+ for estimator_name in all_estimators_tuning\n+ ]\n+ estimators = estimators_standard + estimators_tuning\n# conduct OPE\nope_instance = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=estimators\n@@ -579,9 +680,15 @@ def test_response_format_of_ope_estimators_using_random_evaluation_policy(\naction_dist = random_action_dist\n# test all estimators\nall_estimators = ope.__all_estimators__\n- estimators = [\n+ estimators_standard = [\ngetattr(ope.estimators, estimator_name)() for estimator_name in all_estimators\n]\n+ all_estimators_tuning = ope.__all_estimators_tuning__\n+ estimators_tuning = [\n+ getattr(ope.estimators_tuning, estimator_name)([1, 100, 10000, np.inf])\n+ for estimator_name in all_estimators_tuning\n+ ]\n+ estimators = estimators_standard + estimators_tuning\n# conduct OPE\nope_instance = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=estimators\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add estimators with hyperparam tuning |
641,014 | 18.07.2021 14:41:23 | -32,400 | c6284f176fdd58021ad0e1dbe563188d0eddbfb5 | add subsample option to OpenBanditDataset | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/real.py",
"new_path": "obp/dataset/real.py",
"diff": "from dataclasses import dataclass\nfrom logging import getLogger, basicConfig, INFO\nfrom pathlib import Path\n-from typing import Optional\n-from typing import Union\n-from typing import Tuple\n+from typing import Optional, Union, Tuple\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import rankdata\nfrom sklearn.preprocessing import LabelEncoder\n-from sklearn.utils import check_random_state\n+from sklearn.utils import check_random_state, check_scalar\nfrom .base import BaseRealBanditDataset\nfrom ..types import BanditFeedback\n@@ -37,11 +35,12 @@ class OpenBanditDataset(BaseRealBanditDataset):\nMust be either 'random' or 'bts'.\ncampaign: str\n- One of the three possible campaigns considered in ZOZOTOWN, \"all\", \"men\", and \"women\".\n+ One of the three possible campaigns considered in ZOZOTOWN.\n+ Must be one of \"all\", \"men\", or \"women\".\n- data_path: Path, default=None\n- Path where the Open Bandit Dataset exists.\n- When `None` is given, this class downloads the example small-sized version of the dataset.\n+ data_path: str or Path, default=None\n+ Path where the Open Bandit Dataset is stored.\n+ When `None` is given, this class downloads the example small-sized data.\ndataset_name: str, default='obd'\nName of the dataset.\n@@ -55,7 +54,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\nbehavior_policy: str\ncampaign: str\n- data_path: Optional[Path] = None\n+ data_path: Optional[Union[str, Path]] = None\ndataset_name: str = \"obd\"\ndef __post_init__(self) -> None:\n@@ -74,18 +73,25 @@ class OpenBanditDataset(BaseRealBanditDataset):\n\"women\",\n]:\nraise ValueError(\n- f\"campaign must be one of 'all', 'men', and 'women', but {self.campaign} is given\"\n+ f\"campaign must be one of 'all', 'men', or 'women', but {self.campaign} is given\"\n)\nif self.data_path is None:\nlogger.info(\n\"When `data_path` is not given, this class downloads the example small-sized version of the Open Bandit Dataset.\"\n)\n- self.data_path = Path(__file__).parent / \"obd\"\n+ self.data_path = (\n+ Path(__file__).parent / \"obd\" / self.behavior_policy / self.campaign\n+ )\nelse:\n- if not isinstance(self.data_path, Path):\n- raise ValueError(\"data_path must be a Path type\")\n+ if isinstance(self.data_path, Path):\nself.data_path = self.data_path / self.behavior_policy / self.campaign\n+ elif isinstance(self.data_path, str):\n+ self.data_path = Path(\n+ f\"{self.data_path}/{self.behavior_policy}/{self.campaign}\"\n+ )\n+ else:\n+ raise ValueError(\"data_path must be a string or Path\")\nself.raw_data_file = f\"{self.campaign}.csv\"\nself.load_raw_data()\n@@ -136,7 +142,9 @@ class OpenBanditDataset(BaseRealBanditDataset):\nWhen `None` is given, this class downloads the example small-sized version of the dataset.\ntest_size: float, default=0.3\n- If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n+ Proportion of the dataset included in the test split.\n+ If float, should be between 0.0 and 1.0.\n+ This argument matters only when `is_timeseries_split=True` (the out-sample case).\nis_timeseries_split: bool, default=False\nIf true, split the original logged bandit feedback data by time series.\n@@ -202,12 +210,12 @@ class OpenBanditDataset(BaseRealBanditDataset):\nParameters\n-----------\ntest_size: float, default=0.3\n- If float, should be between 0.0 and 1.0 and represent the proportion of\n- the dataset to include in the evaluation split.\n+ Proportion of the dataset included in the test split.\n+ If float, should be between 0.0 and 1.0.\nThis argument matters only when `is_timeseries_split=True` (the out-sample case).\nis_timeseries_split: bool, default=False\n- If true, split the original logged bandit feedback data by time series.\n+ If true, split the original logged bandit feedback data into train and test sets based on time series.\nReturns\n--------\n@@ -224,10 +232,18 @@ class OpenBanditDataset(BaseRealBanditDataset):\n- action_context: item-related context vectors\n\"\"\"\n+ if not isinstance(is_timeseries_split, bool):\n+ raise TypeError(\n+ f\"`is_timeseries_split` must be a bool, but {type(is_timeseries_split)} is given\"\n+ )\n+\nif is_timeseries_split:\n- if not isinstance(test_size, float) or (test_size <= 0 or test_size >= 1):\n- raise ValueError(\n- f\"test_size must be a float in the (0,1) interval, but {test_size} is given\"\n+ check_scalar(\n+ test_size,\n+ name=\"target_size\",\n+ target_type=(float),\n+ min_val=0.0,\n+ max_val=1.0,\n)\nn_rounds_train = np.int(self.n_rounds * (1.0 - test_size))\nbandit_feedback_train = dict(\n@@ -265,6 +281,7 @@ class OpenBanditDataset(BaseRealBanditDataset):\ndef sample_bootstrap_bandit_feedback(\nself,\n+ sample_size: Optional[int] = None,\ntest_size: float = 0.3,\nis_timeseries_split: bool = False,\nrandom_state: Optional[int] = None,\n@@ -273,13 +290,18 @@ class OpenBanditDataset(BaseRealBanditDataset):\nParameters\n-----------\n+ sample_size: int, default=None\n+ Number of data sampled by bootstrap.\n+ When None is given, the original data size (n_rounds) is used as `sample_size`.\n+ The value must be smaller than the original data size.\n+\ntest_size: float, default=0.3\n- If float, should be between 0.0 and 1.0 and represent the proportion of\n- the dataset to include in the evaluation split.\n+ Proportion of the dataset included in the test split.\n+ If float, should be between 0.0 and 1.0.\nThis argument matters only when `is_timeseries_split=True` (the out-sample case).\nis_timeseries_split: bool, default=False\n- If true, split the original logged bandit feedback data by time series.\n+ If true, split the original logged bandit feedback data into train and test sets based on time series.\nrandom_state: int, default=None\nControls the random seed in bootstrap sampling.\n@@ -299,6 +321,13 @@ class OpenBanditDataset(BaseRealBanditDataset):\n- action_context: item-related context vectors\n\"\"\"\n+ if sample_size:\n+ check_scalar(\n+ sample_size,\n+ name=\"sample_size\",\n+ target_type=(int),\n+ min_val=0,\n+ )\nif is_timeseries_split:\nbandit_feedback = self.obtain_batch_bandit_feedback(\ntest_size=test_size, is_timeseries_split=is_timeseries_split\n@@ -308,8 +337,18 @@ class OpenBanditDataset(BaseRealBanditDataset):\ntest_size=test_size, is_timeseries_split=is_timeseries_split\n)\nn_rounds = bandit_feedback[\"n_rounds\"]\n+ if sample_size is None:\n+ sample_size = bandit_feedback[\"n_rounds\"]\n+ else:\n+ if sample_size > n_rounds:\n+ raise ValueError(\n+ \"`sample_size` must be smaller than the original data size (`n_rounds`).\"\n+ )\nrandom_ = check_random_state(random_state)\n- bootstrap_idx = random_.choice(np.arange(n_rounds), size=n_rounds, replace=True)\n+ bootstrap_idx = random_.choice(\n+ np.arange(n_rounds), size=sample_size, replace=True\n+ )\nfor key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\nbandit_feedback[key_] = bandit_feedback[key_][bootstrap_idx]\n+ bandit_feedback[\"n_rounds\"] = sample_size\nreturn bandit_feedback\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add subsample option to OpenBanditDataset |
641,014 | 18.07.2021 14:41:41 | -32,400 | 806c639b4c2416d75b3a73b157f92c82b230e7e9 | add some tests of OpenBanditDataset | [
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_real.py",
"new_path": "tests/dataset/test_real.py",
"diff": "@@ -19,9 +19,7 @@ def test_real_init():\n# data_path\nwith pytest.raises(ValueError):\n- OpenBanditDataset(\n- behavior_policy=\"random\", campaign=\"all\", data_path=\"raw_str_path\"\n- )\n+ OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\", data_path=5)\n# load_raw_data\nobd = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n@@ -51,6 +49,14 @@ def test_obtain_batch_bandit_feedback():\ndataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\ndataset.obtain_batch_bandit_feedback(is_timeseries_split=True, test_size=-0.5)\n+ with pytest.raises(TypeError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.obtain_batch_bandit_feedback(is_timeseries_split=True, test_size=\"0.5\")\n+\n+ with pytest.raises(TypeError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.obtain_batch_bandit_feedback(is_timeseries_split=\"True\", test_size=0.5)\n+\n# existence of keys\n# is_timeseries_split=False (default)\ndataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n@@ -95,6 +101,30 @@ def test_calc_on_policy_policy_value_estimate():\ndef test_sample_bootstrap_bandit_feedback():\n+ with pytest.raises(ValueError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.sample_bootstrap_bandit_feedback(\n+ is_timeseries_split=True, test_size=1.3\n+ )\n+\n+ with pytest.raises(ValueError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.sample_bootstrap_bandit_feedback(\n+ is_timeseries_split=True, test_size=-0.5\n+ )\n+\n+ with pytest.raises(ValueError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.sample_bootstrap_bandit_feedback(sample_size=-50)\n+\n+ with pytest.raises(TypeError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.sample_bootstrap_bandit_feedback(sample_size=50.0)\n+\n+ with pytest.raises(ValueError):\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ dataset.sample_bootstrap_bandit_feedback(sample_size=10000000)\n+\ndataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\nbandit_feedback = dataset.obtain_batch_bandit_feedback()\nbootstrap_bf = dataset.sample_bootstrap_bandit_feedback()\n@@ -111,3 +141,10 @@ def test_sample_bootstrap_bandit_feedback():\n)\nfor k in bf_keys:\nassert len(bandit_feedback_timeseries[k]) == len(bootstrap_bf_timeseries[k])\n+\n+ sample_size = 1000\n+ dataset = OpenBanditDataset(behavior_policy=\"random\", campaign=\"all\")\n+ bootstrap_bf = dataset.sample_bootstrap_bandit_feedback(sample_size=sample_size)\n+ assert bootstrap_bf[\"n_rounds\"] == sample_size\n+ for k in bf_keys:\n+ assert len(bootstrap_bf[k]) == sample_size\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add some tests of OpenBanditDataset |
641,014 | 28.08.2021 12:17:40 | 14,400 | 962a92f9a231065b1c73cf8573f36771babb541d | add some arguments | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -7,11 +7,11 @@ from typing import Optional, Callable\nimport numpy as np\nfrom scipy.stats import truncnorm\n-from sklearn.utils import check_random_state\n+from sklearn.utils import check_random_state, check_scalar\nfrom .base import BaseBanditDataset\nfrom ..types import BanditFeedback\n-from ..utils import sigmoid, softmax\n+from ..utils import sigmoid, softmax, sample_action_fast\nfrom .reward_type import RewardType\n@@ -47,11 +47,21 @@ class SyntheticBanditDataset(BaseBanditDataset):\nIf None is set, context **independent** expected reward for each action will be\nsampled from the uniform distribution automatically.\n+ reward_std: float, default=1.0\n+ Standard deviation of the reward distribution.\n+ A larger value leads to a noisy reward distribution.\n+ This argument is valid only when `reward_type=\"continuous\"`.\n+\nbehavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default=None\nFunction generating probability distribution over action space,\ni.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\nIf None is set, context **independent** uniform distribution will be used (uniform random behavior policy).\n+ tau: float, default=1.0\n+ A temperature hyperparameer which controls the behavior policy.\n+ A large value leads to a near-uniform behavior policy,\n+ while a small value leads to a near-deterministic behavior policy.\n+\nrandom_state: int, default=12345\nControls the random seed in sampling synthetic bandit dataset.\n@@ -126,9 +136,11 @@ class SyntheticBanditDataset(BaseBanditDataset):\ndim_context: int = 1\nreward_type: str = RewardType.BINARY.value\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\n+ reward_std: float = 1.0\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n] = None\n+ tau: float = 1.0\nrandom_state: int = 12345\ndataset_name: str = \"synthetic_bandit_dataset\"\n@@ -149,6 +161,8 @@ class SyntheticBanditDataset(BaseBanditDataset):\nraise ValueError(\nf\"reward_type must be either '{RewardType.BINARY.value}' or '{RewardType.CONTINUOUS.value}', but {self.reward_type} is given.'\"\n)\n+ check_scalar(self.reward_std, \"reward_std\", (int, float), min_val=0)\n+ check_scalar(self.tau, \"tau\", (int, float), min_val=0)\nif self.random_state is None:\nraise ValueError(\"random_state must be given\")\nself.random_ = check_random_state(self.random_state)\n@@ -159,7 +173,6 @@ class SyntheticBanditDataset(BaseBanditDataset):\nif RewardType(self.reward_type) == RewardType.CONTINUOUS:\nself.reward_min = 0\nself.reward_max = 1e10\n- self.reward_std = 1.0\n# one-hot encoding representations characterizing each action\nself.action_context = np.eye(self.n_actions, dtype=int)\n@@ -269,6 +282,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\n# sample actions for each round based on the behavior policy\nif self.behavior_policy_function is None:\nbehavior_policy_ = np.tile(self.behavior_policy, (n_rounds, 1))\n+ behavior_policy_ = softmax(behavior_policy_ / self.tau)\naction = self.random_.choice(\nnp.arange(self.n_actions), p=self.behavior_policy, size=n_rounds\n)\n@@ -278,19 +292,14 @@ class SyntheticBanditDataset(BaseBanditDataset):\naction_context=self.action_context,\nrandom_state=self.random_state,\n)\n- action = np.array(\n- [\n- self.random_.choice(\n- np.arange(self.n_actions),\n- p=behavior_policy_[i],\n- )\n- for i in np.arange(n_rounds)\n- ]\n+ behavior_policy_ = softmax(behavior_policy_ / self.tau)\n+ action = sample_action_fast(\n+ behavior_policy_, random_state=self.random_state\n)\npscore = behavior_policy_[np.arange(n_rounds), action]\n+ # sample reward based on the context and action\nexpected_reward_ = self.calc_expected_reward(context)\n- reward = self.sample_reward_given_expected_reward(expected_reward_, action)\nif RewardType(self.reward_type) == RewardType.CONTINUOUS:\n# correct expected_reward_, as we use truncated normal distribution here\nmean = expected_reward_\n@@ -299,6 +308,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nexpected_reward_ = truncnorm.stats(\na=a, b=b, loc=mean, scale=self.reward_std, moments=\"m\"\n)\n+ reward = self.sample_reward_given_expected_reward(expected_reward_, action)\nreturn dict(\nn_rounds=n_rounds,\n@@ -471,4 +481,4 @@ def linear_behavior_policy(\nfor d in np.arange(action_context.shape[0]):\nlogits[:, d] = context @ coef_ + action_context[d] @ action_coef_\n- return softmax(logits)\n+ return logits\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add some arguments |
641,014 | 28.08.2021 12:17:55 | 14,400 | 501b0c9a818cf43af4f30ecebea759e401c49e8e | add sample_action_fact func | [
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -93,6 +93,33 @@ def estimate_confidence_interval_by_bootstrap(\n}\n+def sample_action_fast(\n+ action_dist: np.ndarray, random_state: Optional[int] = None\n+) -> np.ndarray:\n+ \"\"\"Sample actions faster based on a given action distribution.\n+\n+ Parameters\n+ ----------\n+ action_dist: array-like, shape (n_rounds, n_actions)\n+ Distribution over actions.\n+\n+ random_state: Optional[int], default=None\n+ Controls the random seed in sampling synthetic bandit dataset.\n+\n+ Returns\n+ ---------\n+ sampled_action: array-like, shape (n_rounds,)\n+ Actions sampled based on `action_dist`.\n+\n+ \"\"\"\n+ random_ = check_random_state(random_state)\n+ uniform_rvs = random_.uniform(size=action_dist.shape[0])[:, np.newaxis]\n+ cum_action_dist = action_dist.cumsum(axis=1)\n+ flg = cum_action_dist > uniform_rvs\n+ sampled_action = flg.argmax(axis=1)\n+ return sampled_action\n+\n+\ndef convert_to_action_dist(\nn_actions: int,\nselected_actions: np.ndarray,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add sample_action_fact func |
641,014 | 28.08.2021 12:18:11 | 14,400 | d15e992a5cca7ab8ca1a45a3294c4232ab4c7700 | add some tests about SyntheticBanditDataset | [
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -28,6 +28,20 @@ def test_synthetic_init():\nwith pytest.raises(ValueError):\nSyntheticBanditDataset(n_actions=2, reward_type=\"aaa\")\n+ # reward_std\n+ with pytest.raises(TypeError):\n+ SyntheticBanditDataset(n_actions=2, reward_std=\"aaa\")\n+\n+ with pytest.raises(ValueError):\n+ SyntheticBanditDataset(n_actions=2, reward_std=-1)\n+\n+ # tau\n+ with pytest.raises(TypeError):\n+ SyntheticBanditDataset(n_actions=2, tau=\"aaa\")\n+\n+ with pytest.raises(ValueError):\n+ SyntheticBanditDataset(n_actions=2, tau=-1)\n+\n# random_state\nwith pytest.raises(ValueError):\nSyntheticBanditDataset(n_actions=2, random_state=None)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/test_utils.py",
"diff": "+import numpy as np\n+\n+from obp.utils import sample_action_fast, softmax\n+\n+\n+def test_sample_action_fast():\n+ n_rounds = 10\n+ n_actions = 5\n+ n_sim = 100000\n+\n+ true_probs = softmax(np.random.normal(size=(n_rounds, n_actions)))\n+ sampled_action_list = list()\n+ for _ in np.arange(n_sim):\n+ sampled_action_list.append(sample_action_fast(true_probs)[:, np.newaxis])\n+\n+ sampled_action_arr = np.concatenate(sampled_action_list, 1)\n+ for i in np.arange(n_rounds):\n+ sampled_action_counts = np.unique(sampled_action_arr[i], return_counts=True)[1]\n+ empirical_probs = sampled_action_counts / n_sim\n+ assert np.isclose(true_probs[i], empirical_probs, rtol=5e-2, atol=1e-3).all()\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add some tests about SyntheticBanditDataset |
641,014 | 28.08.2021 12:53:01 | 14,400 | d4d4684e06c5fed117b383a030e98bfd9f9c75bc | fix a bug and typo in tests | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -465,7 +465,7 @@ def linear_behavior_policy(\nReturns\n---------\nbehavior_policy: array-like, shape (n_rounds, n_actions)\n- Action choice probabilities given context (:math:`x`), i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+ Logit values given context (:math:`x`), i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n\"\"\"\nif not isinstance(context, np.ndarray) or context.ndim != 2:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -7,6 +7,7 @@ from obp.dataset.synthetic import (\nlinear_reward_function,\nlinear_behavior_policy,\n)\n+from obp.utils import softmax\ndef test_synthetic_init():\n@@ -324,6 +325,8 @@ def test_synthetic_linear_behavior_policy():\nn_actions = 5\ncontext = np.ones([n_rounds, dim_context])\naction_context = np.ones([n_actions, dim_action_context])\n- action_prob = linear_behavior_policy(context=context, action_context=action_context)\n+ action_prob = softmax(\n+ linear_behavior_policy(context=context, action_context=action_context)\n+ )\nassert action_prob.shape[0] == n_rounds and action_prob.shape[1] == n_actions\nassert np.all(0 <= action_prob) and np.all(action_prob <= 1)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_regression_models.py",
"new_path": "tests/ope/test_regression_models.py",
"diff": "@@ -825,8 +825,8 @@ def test_performance_of_binary_outcome_models(\nauc_scores: Dict[str, float] = {}\n# check ground truth\nprint(f\"gt_mean: {gt_mean}\")\n- # check the performance of regression models using doubly robust criteria (|\\hat{q} - q| <= |q| is satisfied with a high probability)\n- dr_criteria_pass_rate = 0.8\n+ # check the performance of regression models using doubly robust criterion (|\\hat{q} - q| <= |q| is satisfied with a high probability)\n+ dr_criterion_pass_rate = 0.7\nfit_methods = [\"normal\", \"iw\", \"mrdr\"]\nfor fit_method in fit_methods:\nfor model_name, model in binary_model_dict.items():\n@@ -864,16 +864,16 @@ def test_performance_of_binary_outcome_models(\nnp.zeros_like(bandit_feedback[\"action\"], dtype=int),\n],\n)\n- # compare dr criteria\n- dr_criteria = np.abs((gt_mean - estimated_rewards_by_reg_model)) - np.abs(\n+ # compare dr criterion\n+ dr_criterion = np.abs((gt_mean - estimated_rewards_by_reg_model)) - np.abs(\ngt_mean\n)\nprint(\n- f\"Dr criteria is satisfied with probability {np.mean(dr_criteria <= 0)} ------ model: {model_name} ({fit_method}),\"\n+ f\"Dr criterion is satisfied with probability {np.mean(dr_criterion <= 0)} ------ model: {model_name} ({fit_method}),\"\n)\nassert (\n- np.mean(dr_criteria <= 0) >= dr_criteria_pass_rate\n- ), f\" should be satisfied with a probability at least {dr_criteria_pass_rate}\"\n+ np.mean(dr_criterion <= 0) >= dr_criterion_pass_rate\n+ ), f\" should be satisfied with a probability at least {dr_criterion_pass_rate}\"\nfor model_name in auc_scores:\nprint(f\"AUC of {model_name} is {auc_scores[model_name]}\")\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix a bug and typo in tests |
641,014 | 30.08.2021 07:28:19 | 14,400 | 020a78ee3e90c9cc55896220f2d86d62dcbdd883 | fix Error of meta | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta.py",
"new_path": "obp/ope/meta.py",
"diff": "@@ -12,7 +12,7 @@ import numpy as np\nfrom pandas import DataFrame\nimport seaborn as sns\n-from .estimators import BaseOffPolicyEstimator\n+from .estimators import BaseOffPolicyEstimator, DirectMethod as DM, DoublyRobust as DR\nfrom ..types import BanditFeedback\nfrom ..utils import check_confidence_interval_arguments\n@@ -84,8 +84,11 @@ class OffPolicyEvaluation:\nif key_ not in self.bandit_feedback:\nraise RuntimeError(f\"Missing key of {key_} in 'bandit_feedback'.\")\nself.ope_estimators_ = dict()\n+ self.is_model_dependent = False\nfor estimator in self.ope_estimators:\nself.ope_estimators_[estimator.estimator_name] = estimator\n+ if isinstance(estimator, DM) or isinstance(estimator, DR):\n+ self.is_model_dependent = True\ndef _create_estimator_inputs(\nself,\n@@ -102,9 +105,7 @@ class OffPolicyEvaluation:\nf\"action_dist.ndim must be 3-dimensional, but is {action_dist.ndim}\"\n)\nif estimated_rewards_by_reg_model is None:\n- logger.warning(\n- \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n- )\n+ pass\nelif isinstance(estimated_rewards_by_reg_model, dict):\nfor estimator_name, value in estimated_rewards_by_reg_model.items():\nif not isinstance(value, np.ndarray):\n@@ -171,6 +172,12 @@ class OffPolicyEvaluation:\nDictionary containing estimated policy values by OPE estimators.\n\"\"\"\n+ if self.is_model_dependent:\n+ if estimated_rewards_by_reg_model is None:\n+ raise ValueError(\n+ \"When model dependent estimators such as DM or DR are used, `estimated_rewards_by_reg_model` must be given\"\n+ )\n+\npolicy_value_dict = dict()\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\n@@ -222,6 +229,12 @@ class OffPolicyEvaluation:\nusing nonparametric bootstrap procedure.\n\"\"\"\n+ if self.is_model_dependent:\n+ if estimated_rewards_by_reg_model is None:\n+ raise ValueError(\n+ \"When model dependent estimators such as DM or DR are used, `estimated_rewards_by_reg_model` must be given\"\n+ )\n+\ncheck_confidence_interval_arguments(\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta_continuous.py",
"new_path": "obp/ope/meta_continuous.py",
"diff": "@@ -12,7 +12,10 @@ import numpy as np\nfrom pandas import DataFrame\nimport seaborn as sns\n-from .estimators_continuous import BaseContinuousOffPolicyEstimator\n+from .estimators_continuous import (\n+ BaseContinuousOffPolicyEstimator,\n+ KernelizedDoublyRobust as KDR,\n+)\nfrom ..types import BanditFeedback\nfrom ..utils import check_confidence_interval_arguments\n@@ -96,8 +99,11 @@ class ContinuousOffPolicyEvaluation:\n\"action\"\n]\nself.ope_estimators_ = dict()\n+ self.is_model_dependent = False\nfor estimator in self.ope_estimators:\nself.ope_estimators_[estimator.estimator_name] = estimator\n+ if isinstance(estimator, KDR):\n+ self.is_model_dependent = True\ndef _create_estimator_inputs(\nself,\n@@ -115,9 +121,7 @@ class ContinuousOffPolicyEvaluation:\n\"action_by_evaluation_policy must be 1-dimensional ndarray\"\n)\nif estimated_rewards_by_reg_model is None:\n- logger.warning(\n- \"`estimated_rewards_by_reg_model` is not given; model dependent estimators such as DM or DR cannot be used.\"\n- )\n+ pass\nelif isinstance(estimated_rewards_by_reg_model, dict):\nfor estimator_name, value in estimated_rewards_by_reg_model.items():\nif not isinstance(value, np.ndarray):\n@@ -186,6 +190,12 @@ class ContinuousOffPolicyEvaluation:\nDictionary containing estimated policy values by OPE estimators.\n\"\"\"\n+ if self.is_model_dependent:\n+ if estimated_rewards_by_reg_model is None:\n+ raise ValueError(\n+ \"When model dependent estimators such as DM or DR are used, `estimated_rewards_by_reg_model` must be given\"\n+ )\n+\npolicy_value_dict = dict()\nestimator_inputs = self._create_estimator_inputs(\naction_by_evaluation_policy=action_by_evaluation_policy,\n@@ -237,6 +247,12 @@ class ContinuousOffPolicyEvaluation:\nusing nonparametric bootstrap procedure.\n\"\"\"\n+ if self.is_model_dependent:\n+ if estimated_rewards_by_reg_model is None:\n+ raise ValueError(\n+ \"When model dependent estimators such as DM or DR are used, `estimated_rewards_by_reg_model` must be given\"\n+ )\n+\ncheck_confidence_interval_arguments(\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_meta.py",
"new_path": "tests/ope/test_meta.py",
"diff": "@@ -10,7 +10,7 @@ from pandas.testing import assert_frame_equal\nimport torch\nfrom obp.types import BanditFeedback\n-from obp.ope import OffPolicyEvaluation, BaseOffPolicyEstimator\n+from obp.ope import OffPolicyEvaluation, BaseOffPolicyEstimator, DirectMethod\nfrom obp.utils import check_confidence_interval_arguments\n@@ -310,6 +310,32 @@ def test_meta_post_init(synthetic_bandit_feedback: BanditFeedback) -> None:\n)\n+def test_meta_estimated_rewards_by_reg_model_inputs(\n+ synthetic_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the estimate_policy_values/estimate_intervals functions wrt estimated_rewards_by_reg_model\n+ \"\"\"\n+ ope_ = OffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback, ope_estimators=[DirectMethod()]\n+ )\n+\n+ action_dist = np.zeros(\n+ (synthetic_bandit_feedback[\"n_rounds\"], synthetic_bandit_feedback[\"n_actions\"])\n+ )\n+ with pytest.raises(ValueError):\n+ ope_.estimate_policy_values(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=None,\n+ )\n+\n+ with pytest.raises(ValueError):\n+ ope_.estimate_intervals(\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=None,\n+ )\n+\n+\n# action_dist, estimated_rewards_by_reg_model, description\ninvalid_input_of_create_estimator_inputs = [\n(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_meta_continuous.py",
"new_path": "tests/ope/test_meta_continuous.py",
"diff": "@@ -12,6 +12,7 @@ from obp.types import BanditFeedback\nfrom obp.ope import (\nContinuousOffPolicyEvaluation,\nBaseContinuousOffPolicyEstimator,\n+ KernelizedDoublyRobust,\n)\nfrom obp.utils import check_confidence_interval_arguments\n@@ -186,6 +187,32 @@ def test_meta_post_init(synthetic_continuous_bandit_feedback: BanditFeedback) ->\n)\n+def test_meta_estimated_rewards_by_reg_model_inputs(\n+ synthetic_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the estimate_policy_values/estimate_intervals functions wrt estimated_rewards_by_reg_model\n+ \"\"\"\n+ kdr = KernelizedDoublyRobust(kernel=\"cosine\", bandwidth=0.1)\n+ ope_ = ContinuousOffPolicyEvaluation(\n+ bandit_feedback=synthetic_bandit_feedback,\n+ ope_estimators=[kdr],\n+ )\n+\n+ action_by_evaluation_policy = np.zeros((synthetic_bandit_feedback[\"n_rounds\"],))\n+ with pytest.raises(ValueError):\n+ ope_.estimate_policy_values(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=None,\n+ )\n+\n+ with pytest.raises(ValueError):\n+ ope_.estimate_intervals(\n+ action_by_evaluation_policy=action_by_evaluation_policy,\n+ estimated_rewards_by_reg_model=None,\n+ )\n+\n+\n# action_by_evaluation_policy, estimated_rewards_by_reg_model, description\ninvalid_input_of_create_estimator_inputs = [\n(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix Error of meta |
641,014 | 30.08.2021 14:36:02 | 14,400 | 36beaad6a1c9e6c5c4aee8a572338138645f7fc9 | add check_array to synthetic_continuous | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_continuous.py",
"new_path": "obp/dataset/synthetic_continuous.py",
"diff": "@@ -11,6 +11,7 @@ from sklearn.utils import check_random_state, check_scalar\nfrom .base import BaseBanditDataset\nfrom ..types import BanditFeedback\n+from ..utils import check_array\n@dataclass\n@@ -238,8 +239,7 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\nThe policy value of the evaluation policy on the given test bandit feedback data.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n\"Expected `context.shape[1] == self.dim_context`, found it False\"\n@@ -286,10 +286,8 @@ def linear_reward_funcion_continuous(\nExpected reward given context (:math:`x`) and continuous action (:math:`a`).\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n- if not isinstance(action, np.ndarray) or action.ndim != 1:\n- raise ValueError(\"action must be 1D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\n+ check_array(array=action, name=\"action\", expected_dim=1)\nif context.shape[0] != action.shape[0]:\nraise ValueError(\n\"Expected `context.shape[0] == action.shape[0]`, but found it False\"\n@@ -325,10 +323,8 @@ def quadratic_reward_funcion_continuous(\nExpected reward given context (:math:`x`) and continuous action (:math:`a`).\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n- if not isinstance(action, np.ndarray) or action.ndim != 1:\n- raise ValueError(\"action must be 1D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\n+ check_array(array=action, name=\"action\", expected_dim=1)\nif context.shape[0] != action.shape[0]:\nraise ValueError(\n\"Expected `context.shape[0] == action.shape[0]`, but found it False\"\n@@ -366,8 +362,7 @@ def linear_behavior_policy_continuous(\nExpected continuous action values given context (:math:`x`).\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\nrandom_ = check_random_state(random_state)\ncoef_ = random_.normal(size=context.shape[1])\n@@ -392,8 +387,7 @@ def linear_synthetic_policy_continuous(context: np.ndarray) -> np.ndarray:\nContinuous action values given by a synthetic (deterministic) evaluation policy, i.e., :math:`\\\\pi_e(x_t)`.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\nreturn context.mean(1)\n@@ -412,8 +406,7 @@ def threshold_synthetic_policy_continuous(context: np.ndarray) -> np.ndarray:\nContinuous action values given by a synthetic (deterministic) evaluation policy, i.e., :math:`\\\\pi_e(x_t)`.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\nreturn 1.0 + np.sign(context.mean(1) - 1.5)\n@@ -432,7 +425,6 @@ def sign_synthetic_policy_continuous(context: np.ndarray) -> np.ndarray:\nContinuous action values given by a synthetic (deterministic) evaluation policy, i.e., :math:`\\\\pi_e(x_t)`.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\nreturn np.sin(context.mean(1))\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add check_array to synthetic_continuous |
641,014 | 31.08.2021 18:18:12 | 14,400 | 274f651a48f3f2e088a09b5b9d44e350e1855c1e | add error to detect negative rewards for IPWLearner | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -18,8 +18,7 @@ import torch.optim as optim\nfrom tqdm import tqdm\nfrom .base import BaseOfflinePolicyLearner\n-\n-from ..utils import check_bandit_feedback_inputs\n+from ..utils import check_bandit_feedback_inputs, check_array\n@dataclass\n@@ -147,11 +146,20 @@ class IPWLearner(BaseOfflinePolicyLearner):\npscore=pscore,\nposition=position,\n)\n+ if (reward < 0).any():\n+ raise ValueError(\n+ \"A negative value is found in `reward`.\"\n+ \"`obp.policy.IPWLearner` cannot handle negative rewards,\"\n+ \"and please use `obp.policy.NNPolicyLearner` instead.\"\n+ )\nif pscore is None:\nn_actions = np.int(action.max() + 1)\npscore = np.ones_like(action) / n_actions\n- if position is None or self.len_list == 1:\n+ if self.len_list == 1:\nposition = np.zeros_like(action, dtype=int)\n+ else:\n+ if position is None:\n+ raise ValueError(\"When `self.len_list=1`, `position` must be given.\")\nfor position_ in np.arange(self.len_list):\nX, sample_weight, y = self._create_train_data_for_opl(\n@@ -184,8 +192,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\nIf you want a non-repetitive action set, please use the `sample_action` method.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\nn_rounds = context.shape[0]\naction_dist = np.zeros((n_rounds, self.n_actions, self.len_list))\n@@ -214,9 +221,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\nScores for all possible pairs of action and position predicted by a classifier.\n\"\"\"\n- assert (\n- isinstance(context, np.ndarray) and context.ndim == 2\n- ), \"context must be 2D array\"\n+ check_array(array=context, name=\"context\", expected_dim=2)\nn_rounds = context.shape[0]\nscore_predicted = np.zeros((n_rounds, self.n_actions, self.len_list))\n@@ -271,8 +276,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\nAction sampled by a trained classifier.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n+ check_array(array=context, name=\"context\", expected_dim=2)\ncheck_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\nn_rounds = context.shape[0]\n@@ -329,10 +333,8 @@ class IPWLearner(BaseOfflinePolicyLearner):\n\"\"\"\nassert (\nself.len_list == 1\n- ), \"predict_proba method can be used only when len_list = 1\"\n- assert (\n- isinstance(context, np.ndarray) and context.ndim == 2\n- ), \"context must be 2D array\"\n+ ), \"predict_proba method cannot be used when `len_list != 1`\"\n+ check_array(array=context, name=\"context\", expected_dim=2)\ncheck_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\nscore_predicted = self.predict_score(context=context)\n@@ -761,19 +763,16 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\npscore=pscore,\nposition=position,\n)\n-\nif context.shape[1] != self.dim_context:\nraise ValueError(\n\"Expected `context.shape[1] == self.dim_context`, but found it False\"\n)\n-\nif pscore is None:\npscore = np.ones_like(action) / self.n_actions\nif estimated_rewards_by_reg_model is None:\nestimated_rewards_by_reg_model = np.zeros(\n(context.shape[0], self.n_actions, self.len_list)\n)\n-\nif self.len_list == 1:\nposition = np.zeros_like(action, dtype=int)\nelse:\n@@ -900,9 +899,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nIf you want a non-repetitive action set, please use the `sample_action` method.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n-\n+ check_array(array=context, name=\"context\", expected_dim=2)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n\"Expected `context.shape[1] == self.dim_context`, but found it False\"\n@@ -939,9 +936,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nAction sampled by a trained classifier.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n-\n+ check_array(array=context, name=\"context\", expected_dim=2)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n\"Expected `context.shape[1] == self.dim_context`, but found it False\"\n@@ -988,9 +983,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nAction choice probabilities obtained by a trained classifier.\n\"\"\"\n- if not isinstance(context, np.ndarray) or context.ndim != 2:\n- raise ValueError(\"context must be 2D array\")\n-\n+ check_array(array=context, name=\"context\", expected_dim=2)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n\"Expected `context.shape[1] == self.dim_context`, but found it False\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add error to detect negative rewards for IPWLearner |
641,014 | 31.08.2021 18:18:28 | 14,400 | e3f494d818a5a2e32f582ee0cfd1da3f7d895b9a | adjust tests to changes in obp | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_regression_models.py",
"new_path": "tests/ope/test_regression_models.py",
"diff": "@@ -334,7 +334,7 @@ invalid_input_of_fitting_regression_models = [\nNone,\n3,\n1,\n- \"context, action, reward, and position must have the same number of samples.\",\n+ \"Expected `context.shape[0]\",\n),\n(\nnp.random.uniform(size=(n_rounds, 7)),\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline.py",
"new_path": "tests/policy/test_offline.py",
"diff": "import pytest\nimport numpy as np\n-from sklearn.linear_model import LogisticRegression\n+from sklearn.linear_model import LogisticRegression, LinearRegression\nimport torch\nfrom obp.policy.offline import IPWLearner\n@@ -9,33 +9,102 @@ from obp.policy.policy_type import PolicyType\nfrom obp.ope.estimators import InverseProbabilityWeighting\n-def test_base_opl_init():\n- # n_actions\n- with pytest.raises(ValueError):\n- IPWLearner(n_actions=1)\n+base_classifier = LogisticRegression()\n+base_regressor = LinearRegression()\n- with pytest.raises(ValueError):\n- IPWLearner(n_actions=\"3\")\n+# n_actions, len_list, dim_context, base_classifier, description\n+invalid_input_of_nn_policy_learner_init = [\n+ (\n+ 0, #\n+ 1,\n+ 2,\n+ base_classifier,\n+ \"n_actions must be an integer larger than 1\",\n+ ),\n+ (\n+ 10,\n+ -1, #\n+ 2,\n+ base_classifier,\n+ \"len_list must be a positive integer\",\n+ ),\n+ (\n+ 10,\n+ 20, #\n+ 2,\n+ base_classifier,\n+ \"Expected `n_actions\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ -1, #\n+ base_classifier,\n+ \"dim_context must be a positive integer\",\n+ ),\n+ (10, 1, 2, base_regressor, \"base_classifier must be a classifier\"), #\n+]\n- # len_list\n- with pytest.raises(ValueError):\n- IPWLearner(n_actions=2, len_list=0)\n+valid_input_of_nn_policy_learner_init = [\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ None,\n+ \"valid input\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ base_classifier,\n+ \"valid input\",\n+ ),\n+]\n- with pytest.raises(ValueError):\n- IPWLearner(n_actions=2, len_list=\"3\")\n- # policy_type\n- assert IPWLearner(n_actions=2).policy_type == PolicyType.OFFLINE\[email protected](\n+ \"n_actions, len_list, dim_context, base_classifier, description\",\n+ invalid_input_of_nn_policy_learner_init,\n+)\n+def test_ipw_learner_init_using_invalid_inputs(\n+ n_actions,\n+ len_list,\n+ dim_context,\n+ base_classifier,\n+ description,\n+):\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = IPWLearner(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ base_classifier=base_classifier,\n+ )\n- # invalid relationship between n_actions and len_list\n- with pytest.raises(ValueError):\n- IPWLearner(n_actions=5, len_list=10)\n- with pytest.raises(ValueError):\n- IPWLearner(n_actions=2, len_list=3)\[email protected](\n+ \"n_actions, len_list, dim_context, base_classifier, description\",\n+ valid_input_of_nn_policy_learner_init,\n+)\n+def test_ipw_learner_init_using_valid_inputs(\n+ n_actions,\n+ len_list,\n+ dim_context,\n+ base_classifier,\n+ description,\n+):\n+ ipw_learner = IPWLearner(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ base_classifier=base_classifier,\n+ )\n+ # policy_type\n+ assert ipw_learner.policy_type == PolicyType.OFFLINE\n-def test_ipw_learner_init():\n+def test_ipw_learner_init_base_classifier_list():\n# base classifier\nlen_list = 2\nlearner1 = IPWLearner(n_actions=2, len_list=len_list)\n@@ -43,11 +112,6 @@ def test_ipw_learner_init():\nfor i in range(len_list):\nassert isinstance(learner1.base_classifier_list[i], LogisticRegression)\n- with pytest.raises(ValueError):\n- from sklearn.linear_model import LinearRegression\n-\n- IPWLearner(n_actions=2, base_classifier=LinearRegression())\n-\nfrom sklearn.naive_bayes import GaussianNB\nlearner2 = IPWLearner(n_actions=2, len_list=len_list, base_classifier=GaussianNB())\n@@ -73,33 +137,59 @@ def test_ipw_learner_create_train_data_for_opl():\ndef test_ipw_learner_fit():\n- context = np.array([1.0, 1.0, 1.0, 1.0]).reshape(2, -1)\n- action = np.array([0, 1])\n- reward = np.array([1.0, 0.0])\n- position = np.array([0, 0])\n- learner = IPWLearner(n_actions=2, len_list=1)\n- learner.fit(context=context, action=action, reward=reward, position=position)\n+ n_rounds = 1000\n+ dim_context = 5\n+ n_actions = 3\n+ len_list = 2\n+ context = np.ones((n_rounds, dim_context))\n+ action = np.random.choice(np.arange(len_list, dtype=int), size=n_rounds)\n+ reward = np.random.choice(np.arange(2), size=n_rounds)\n+ position = np.random.choice(np.arange(len_list, dtype=int), size=n_rounds)\n# inconsistency with the shape\n- with pytest.raises(ValueError):\n- learner = IPWLearner(n_actions=2, len_list=2)\n- variant_context = np.array([1.0, 1.0, 1.0, 1.0])\n+ desc = \"Expected `context.shape[0]\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\n+ learner = IPWLearner(n_actions=n_actions, len_list=len_list)\n+ variant_context = np.random.normal(size=(n_rounds + 1, n_actions))\nlearner.fit(\n- context=variant_context, action=action, reward=reward, position=position\n+ context=variant_context,\n+ action=action,\n+ reward=reward,\n+ position=position,\n)\n# len_list > 2, but position is not set\n- with pytest.raises(ValueError):\n- learner = IPWLearner(n_actions=2, len_list=2)\n+ desc = \"When `self.len_list=1\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\n+ learner = IPWLearner(n_actions=n_actions, len_list=len_list)\nlearner.fit(context=context, action=action, reward=reward)\n+ # position must be non-negative\n+ desc = \"position elements must be non-negative integers\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\n+ negative_position = position - 1\n+ learner = IPWLearner(n_actions=n_actions, len_list=len_list)\n+ learner.fit(\n+ context=context, action=action, reward=reward, position=negative_position\n+ )\n+\n+ # IPWLearner cannot handle negative rewards\n+ desc = \"A negative value is found in\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\n+ negative_reward = reward - 1.0\n+ learner = IPWLearner(n_actions=n_actions, len_list=len_list)\n+ learner.fit(\n+ context=context, action=action, reward=negative_reward, position=position\n+ )\n+\ndef test_ipw_learner_predict():\nn_actions = 2\nlen_list = 1\n# shape error\n- with pytest.raises(ValueError):\n+ desc = \"context must be 2D array\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\ncontext = np.array([1.0, 1.0])\nlearner = IPWLearner(n_actions=n_actions, len_list=len_list)\nlearner.predict(context=context)\n@@ -133,11 +223,12 @@ def test_ipw_learner_sample_action():\nlearner = IPWLearner(n_actions=n_actions, len_list=len_list)\nlearner.fit(context=context, action=action, reward=reward, position=position)\n- with pytest.raises(ValueError):\n+ desc = \"context must be 2D array\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\ninvalid_type_context = [1.0, 2.0]\nlearner.sample_action(context=invalid_type_context)\n- with pytest.raises(ValueError):\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\ninvalid_ndim_context = np.array([1.0, 2.0, 3.0, 4.0])\nlearner.sample_action(context=invalid_ndim_context)\n@@ -1009,7 +1100,8 @@ def test_nn_policy_learner_fit():\nipw = InverseProbabilityWeighting()\n# inconsistency with the shape\n- with pytest.raises(ValueError):\n+ desc = \"Expected `context.shape[0]\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\nlearner = NNPolicyLearner(\nn_actions=2,\ndim_context=2,\n@@ -1021,7 +1113,8 @@ def test_nn_policy_learner_fit():\n)\n# inconsistency between dim_context and context\n- with pytest.raises(ValueError):\n+ desc = \"Expected `context.shape[1]\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\nlearner = NNPolicyLearner(\nn_actions=2,\ndim_context=3,\n@@ -1041,7 +1134,8 @@ def test_nn_policy_learner_predict():\nipw = InverseProbabilityWeighting()\n# shape error\n- with pytest.raises(ValueError):\n+ desc = \"context must be 2D array\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\nlearner = NNPolicyLearner(\nn_actions=n_actions,\nlen_list=len_list,\n@@ -1053,7 +1147,8 @@ def test_nn_policy_learner_predict():\nlearner.predict(context=invalid_context)\n# inconsistency between dim_context and context\n- with pytest.raises(ValueError):\n+ desc = \"Expected `context.shape[1]\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\nlearner = NNPolicyLearner(\nn_actions=n_actions,\nlen_list=len_list,\n@@ -1093,7 +1188,8 @@ def test_nn_policy_learner_sample_action():\nipw = InverseProbabilityWeighting()\n# shape error\n- with pytest.raises(ValueError):\n+ desc = \"context must be 2D array\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\nlearner = NNPolicyLearner(\nn_actions=n_actions,\nlen_list=len_list,\n@@ -1105,7 +1201,8 @@ def test_nn_policy_learner_sample_action():\nlearner.sample_action(context=invalid_context)\n# inconsistency between dim_context and context\n- with pytest.raises(ValueError):\n+ desc = \"Expected `context.shape[1]\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\nlearner = NNPolicyLearner(\nn_actions=n_actions,\nlen_list=len_list,\n@@ -1143,7 +1240,8 @@ def test_nn_policy_learner_predict_proba():\nipw = InverseProbabilityWeighting()\n# shape error\n- with pytest.raises(ValueError):\n+ desc = \"context must be 2D array\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\nlearner = NNPolicyLearner(\nn_actions=n_actions,\nlen_list=len_list,\n@@ -1155,7 +1253,8 @@ def test_nn_policy_learner_predict_proba():\nlearner.predict_proba(context=invalid_context)\n# inconsistency between dim_context and context\n- with pytest.raises(ValueError):\n+ desc = \"Expected `context.shape[1]\"\n+ with pytest.raises(ValueError, match=f\"{desc}*\"):\nlearner = NNPolicyLearner(\nn_actions=n_actions,\nlen_list=len_list,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | adjust tests to changes in obp |
641,014 | 31.08.2021 18:28:07 | 14,400 | 35a053fed6a0fe1f8155e8a5c6bf0e0604995af0 | apply check_array functions | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_continuous.py",
"new_path": "obp/dataset/synthetic_continuous.py",
"diff": "@@ -240,12 +240,11 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\n\"\"\"\ncheck_array(array=context, name=\"context\", expected_dim=2)\n+ check_array(array=action, name=\"action\", expected_dim=1)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n\"Expected `context.shape[1] == self.dim_context`, found it False\"\n)\n- if not isinstance(action, np.ndarray) or action.ndim != 1:\n- raise ValueError(\"action must be 1D array\")\nif context.shape[0] != action.shape[0]:\nraise ValueError(\n\"Expected `context.shape[0] == action.shape[0]`, but found it False\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | apply check_array functions |
641,014 | 03.09.2021 18:16:10 | 14,400 | 67d283af7f16281c7445cce5bd43972df8321e14 | make the hyperparameter tuning method more flexible | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/helper.py",
"new_path": "obp/ope/helper.py",
"diff": "@@ -7,6 +7,53 @@ import numpy as np\nfrom sklearn.utils import check_scalar\n+def estimate_bias_in_ope(\n+ reward: np.ndarray,\n+ iw: np.ndarray,\n+ iw_hat: np.ndarray,\n+ q_hat: Optional[np.ndarray] = None,\n+) -> float:\n+ \"\"\"Helper to estimate a bias in OPE.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ iw: array-like, shape (n_rounds,)\n+ Importance weight in each round of the logged bandit feedback, i.e., :math:`w(x,a)=\\\\pi_e(a|x)/ \\\\pi_b(a|x)`.\n+\n+ iw_hat: array-like, shape (n_rounds,)\n+ Importance weight (IW) modified by a hyparpareter. How IW is modified depends on the estimator as follows.\n+ - clipping: :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\\\lambda, w(x,a) \\\\}`\n+ - switching: :math:`\\\\hat{w}(x,a) := w(x,a) \\\\cdot \\\\mathbb{I} \\\\{ w(x,a) < \\\\tau \\\\}`\n+ - shrinkage: :math:`\\\\hat{w}(x,a) := (\\\\lambda w(x,a)) / (\\\\lambda + w^2(x,a))`\n+ where :math:`\\\\tau` and :math:`\\\\lambda` are hyperparameters.\n+\n+ q_hat: array-like, shape (n_rounds,), default=None\n+ Estimated expected reward given context :math:`x_t` and action :math:`a_t`.\n+\n+ Returns\n+ ----------\n+ estimated_bias: float\n+ Estimated the bias in OPE.\n+ This is based on the direct bias estimation stated on page 17 of Su et al.(2020).\n+\n+ References\n+ ----------\n+ Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.\n+ \"Doubly Robust Off-Policy Evaluation with Shrinkage.\", 2020.\n+\n+ \"\"\"\n+ n_rounds = reward.shape[0]\n+ if q_hat is None:\n+ q_hat = np.zeros(n_rounds)\n+ estimated_bias_arr = (iw - iw_hat) * (reward - q_hat)\n+ estimated_bias = np.abs(estimated_bias_arr.mean())\n+\n+ return estimated_bias\n+\n+\ndef estimate_high_probability_upper_bound_bias(\nreward: np.ndarray,\niw: np.ndarray,\n@@ -51,14 +98,16 @@ def estimate_high_probability_upper_bound_bias(\n\"\"\"\ncheck_scalar(\n- delta, name=\"delta\", target_type=(int, float), max_val=1.0, min_val=0.0\n+ delta, name=\"delta\", target_type=(int, float), min_val=0.0, max_val=1.0\n)\n+ bias_upper_bound = estimate_bias_in_ope(\n+ reward=reward,\n+ iw=iw,\n+ iw_hat=iw_hat,\n+ q_hat=q_hat,\n+ )\nn_rounds = reward.shape[0]\n- if q_hat is None:\n- q_hat = np.zeros(n_rounds)\n- bias_upper_bound_arr = (iw - iw_hat) * (reward - q_hat)\n- bias_upper_bound = np.abs(bias_upper_bound_arr.mean())\nbias_upper_bound += np.sqrt((2 * (iw ** 2).mean() * np.log(2 / delta)) / n_rounds)\nbias_upper_bound += (2 * iw.max() * np.log(2 / delta)) / (3 * n_rounds)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | make the hyperparameter tuning method more flexible |
641,014 | 03.09.2021 18:45:16 | 14,400 | 2f0e1ed01140fc9724d8efbc62eb7f9501ad5ba2 | change tau of Switch to lambda_ to make the name consistent across estimators | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -1521,20 +1521,20 @@ class SwitchDoublyRobust(DoublyRobust):\n.. math::\n- \\\\hat{V}_{\\\\mathrm{SwitchDR}} (\\\\pi_e; \\\\mathcal{D}, \\\\hat{q}, \\\\tau)\n- := \\\\mathbb{E}_{\\\\mathcal{D}} [\\\\hat{q}(x_t,\\\\pi_e) + w(x_t,a_t) (r_t - \\\\hat{q}(x_t,a_t)) \\\\mathbb{I} \\\\{ w(x_t,a_t) \\\\le \\\\tau \\\\}],\n+ \\\\hat{V}_{\\\\mathrm{SwitchDR}} (\\\\pi_e; \\\\mathcal{D}, \\\\hat{q}, \\\\lambda)\n+ := \\\\mathbb{E}_{\\\\mathcal{D}} [\\\\hat{q}(x_t,\\\\pi_e) + w(x_t,a_t) (r_t - \\\\hat{q}(x_t,a_t)) \\\\mathbb{I} \\\\{ w(x_t,a_t) \\\\le \\\\lambda \\\\}],\nwhere :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by\na behavior policy :math:`\\\\pi_b`. :math:`w(x,a):=\\\\pi_e (a|x)/\\\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.\n:math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n- :math:`\\\\tau (\\\\ge 0)` is a switching hyperparameter, which decides the threshold for the importance weight.\n+ :math:`\\\\lambda (\\\\ge 0)` is a switching hyperparameter, which decides the threshold for the importance weight.\n:math:`\\\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.\n:math:`\\\\hat{q} (x_t,\\\\pi):= \\\\mathbb{E}_{a \\\\sim \\\\pi(a|x)}[\\\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\\\pi`.\nTo estimate the mean reward function, please use `obp.ope.regression_model.RegressionModel`.\nParameters\n----------\n- tau: float, default=np.inf\n+ lambda_: float, default=np.inf\nSwitching hyperparameter. When importance weight is larger than this parameter, DM is applied, otherwise DR is used.\nThis hyperparameter should be larger than or equal to 0., otherwise it is meaningless.\n@@ -1554,19 +1554,19 @@ class SwitchDoublyRobust(DoublyRobust):\n\"\"\"\n- tau: float = np.inf\n+ lambda_: float = np.inf\nestimator_name: str = \"switch-dr\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\ncheck_scalar(\n- self.tau,\n- name=\"tau\",\n+ self.lambda_,\n+ name=\"lambda_\",\ntarget_type=(int, float),\nmin_val=0.0,\n)\n- if self.tau != self.tau:\n- raise ValueError(\"tau must not be nan\")\n+ if self.lambda_ != self.lambda_:\n+ raise ValueError(\"lambda_ must not be nan\")\ndef _estimate_round_rewards(\nself,\n@@ -1610,7 +1610,7 @@ class SwitchDoublyRobust(DoublyRobust):\n\"\"\"\nn_rounds = action.shape[0]\niw = action_dist[np.arange(n_rounds), action, position] / pscore\n- switch_indicator = np.array(iw <= self.tau, dtype=int)\n+ switch_indicator = np.array(iw <= self.lambda_, dtype=int)\nq_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n]\n@@ -1683,7 +1683,7 @@ class SwitchDoublyRobust(DoublyRobust):\nReturns\n----------\nestimated_mse_score: float\n- Estimated MSE score of a given switching hyperparameter `tau`.\n+ Estimated MSE score of a given switching hyperparameter `lambda_`.\nMSE score is the sum of (high probability) upper bound of bias and the sample variance.\nThis is estimated using the automatic hyperparameter tuning procedure\nbased on Section 5 of Su et al.(2020).\n@@ -1709,7 +1709,7 @@ class SwitchDoublyRobust(DoublyRobust):\nbias_term = estimate_high_probability_upper_bound_bias(\nreward=reward,\niw=iw,\n- iw_hat=iw * np.array(iw <= self.tau, dtype=int),\n+ iw_hat=iw * np.array(iw <= self.lambda_, dtype=int),\nq_hat=estimated_rewards_by_reg_model[\nnp.arange(n_rounds), action, position\n],\n@@ -1719,7 +1719,7 @@ class SwitchDoublyRobust(DoublyRobust):\nbias_term = estimate_bias_in_ope(\nreward=reward,\niw=iw,\n- iw_hat=iw * np.array(iw <= self.tau, dtype=int),\n+ iw_hat=iw * np.array(iw <= self.lambda_, dtype=int),\nq_hat=estimated_rewards_by_reg_model[\nnp.arange(n_rounds), action, position\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -26,7 +26,7 @@ class BaseOffPolicyEstimatorTuning:\nAn OPE estimator with a hyperparameter\n(such as IPW/DR with clipping, Switch-DR, and DR with Shrinkage).\n- candidate_hyperparameter_list: List[float]\n+ lambdas: List[float]\nA list of candidate hyperparameter values.\nuse_bias_upper_bound: bool, default=True\n@@ -46,7 +46,7 @@ class BaseOffPolicyEstimatorTuning:\n\"\"\"\nbase_ope_estimator: BaseOffPolicyEstimator = field(init=False)\n- candidate_hyperparameter_list: List[float] = field(init=False)\n+ lambdas: List[float] = None\nuse_bias_upper_bound: bool = True\ndelta: float = 0.05\n@@ -54,28 +54,29 @@ class BaseOffPolicyEstimatorTuning:\ndataclass(cls)\nreturn super().__new__(cls)\n- def _check_candidate_hyperparameter_list(self, hyperparam_name: str) -> None:\n- \"\"\"Check type and value of candidate_hyperparameter_list.\"\"\"\n- if isinstance(self.candidate_hyperparameter_list, list):\n- if len(self.candidate_hyperparameter_list) == 0:\n- raise ValueError(f\"{hyperparam_name} must not be empty\")\n- for hyperparam_ in self.candidate_hyperparameter_list:\n+ def _check_lambdas(self) -> None:\n+ \"\"\"Check type and value of lambdas.\"\"\"\n+ if isinstance(self.lambdas, list):\n+ if len(self.lambdas) == 0:\n+ raise ValueError(\"lambdas must not be empty\")\n+ for hyperparam_ in self.lambdas:\ncheck_scalar(\nhyperparam_,\n- name=f\"an element of {hyperparam_name}\",\n+ name=\"an element of lambdas\",\ntarget_type=(int, float),\nmin_val=0.0,\n)\nif hyperparam_ != hyperparam_:\n- raise ValueError(f\"an element of {hyperparam_name} must not be nan\")\n+ raise ValueError(\"an element of lambdas must not be nan\")\nelse:\n- raise TypeError(f\"{hyperparam_name} must be a list\")\n+ raise TypeError(\"lambdas must be a list\")\ndef _check_init_inputs(self) -> None:\n\"\"\"Initialize Class.\"\"\"\nif not isinstance(self.use_bias_upper_bound, bool):\nraise TypeError(\n- f\"`use_bias_upper_bound` must be a bool, but {type(self.use_bias_upper_bound)} is given\"\n+ \"`use_bias_upper_bound` must be a bool\"\n+ \", but {type(self.use_bias_upper_bound)} is given\"\n)\ncheck_scalar(self.delta, \"delta\", target_type=(float), min_val=0.0, max_val=1.0)\n@@ -90,7 +91,7 @@ class BaseOffPolicyEstimatorTuning:\n) -> None:\n\"\"\"Find the best hyperparameter value from the given candidate set.\"\"\"\nself.estimated_mse_score_dict = dict()\n- for hyperparam_ in self.candidate_hyperparameter_list:\n+ for hyperparam_ in self.lambdas:\nestimated_mse_score = self.base_ope_estimator(\nhyperparam_\n)._estimate_mse_score(\n@@ -271,14 +272,12 @@ class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):\n\"\"\"\n- lambdas: List[float] = None\nestimator_name: str = \"ipw\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\nself.base_ope_estimator = InverseProbabilityWeighting\n- self.candidate_hyperparameter_list = self.lambdas\n- super()._check_candidate_hyperparameter_list(hyperparam_name=\"lambdas\")\n+ super()._check_lambdas()\nsuper()._check_init_inputs()\ndef estimate_policy_value(\n@@ -438,8 +437,7 @@ class DoublyRobustTuning(BaseOffPolicyEstimatorTuning):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\nself.base_ope_estimator = DoublyRobust\n- self.candidate_hyperparameter_list = self.lambdas\n- super()._check_candidate_hyperparameter_list(hyperparam_name=\"lambdas\")\n+ super()._check_lambdas()\nsuper()._check_init_inputs()\ndef estimate_policy_value(\n@@ -595,7 +593,7 @@ class SwitchDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nParameters\n----------\n- taus: List[float]\n+ lambdas: List[float]\nA list of candidate switching hyperparameters.\nThe automatic hyperparameter tuning proposed by Su et al.(2020)\nwill choose the best hyperparameter value from the data.\n@@ -613,14 +611,12 @@ class SwitchDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\n\"\"\"\n- taus: List[float] = None\nestimator_name: str = \"switch-dr\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\nself.base_ope_estimator = SwitchDoublyRobust\n- self.candidate_hyperparameter_list = self.taus\n- super()._check_candidate_hyperparameter_list(hyperparam_name=\"taus\")\n+ super()._check_lambdas()\nsuper()._check_init_inputs()\ndef estimate_policy_value(\n@@ -794,14 +790,12 @@ class DoublyRobustWithShrinkageTuning(BaseOffPolicyEstimatorTuning):\n\"\"\"\n- lambdas: List[float] = None\nestimator_name: str = \"dr-os\"\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\nself.base_ope_estimator = DoublyRobustWithShrinkage\n- self.candidate_hyperparameter_list = self.lambdas\n- super()._check_candidate_hyperparameter_list(hyperparam_name=\"lambdas\")\n+ super()._check_lambdas()\nsuper()._check_init_inputs()\ndef estimate_policy_value(\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/helper.py",
"new_path": "obp/ope/helper.py",
"diff": "@@ -26,9 +26,9 @@ def estimate_bias_in_ope(\niw_hat: array-like, shape (n_rounds,)\nImportance weight (IW) modified by a hyparpareter. How IW is modified depends on the estimator as follows.\n- clipping: :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\\\lambda, w(x,a) \\\\}`\n- - switching: :math:`\\\\hat{w}(x,a) := w(x,a) \\\\cdot \\\\mathbb{I} \\\\{ w(x,a) < \\\\tau \\\\}`\n+ - switching: :math:`\\\\hat{w}(x,a) := w(x,a) \\\\cdot \\\\mathbb{I} \\\\{ w(x,a) < \\\\lambda \\\\}`\n- shrinkage: :math:`\\\\hat{w}(x,a) := (\\\\lambda w(x,a)) / (\\\\lambda + w^2(x,a))`\n- where :math:`\\\\tau` and :math:`\\\\lambda` are hyperparameters.\n+ where :math:`\\\\lambda` is a hyperparameter value.\nq_hat: array-like, shape (n_rounds,), default=None\nEstimated expected reward given context :math:`x_t` and action :math:`a_t`.\n@@ -74,9 +74,9 @@ def estimate_high_probability_upper_bound_bias(\niw_hat: array-like, shape (n_rounds,)\nImportance weight (IW) modified by a hyparpareter. How IW is modified depends on the estimator as follows.\n- clipping: :math:`\\\\hat{w}(x,a) := \\\\min \\\\{ \\\\lambda, w(x,a) \\\\}`\n- - switching: :math:`\\\\hat{w}(x,a) := w(x,a) \\\\cdot \\\\mathbb{I} \\\\{ w(x,a) < \\\\tau \\\\}`\n+ - switching: :math:`\\\\hat{w}(x,a) := w(x,a) \\\\cdot \\\\mathbb{I} \\\\{ w(x,a) < \\\\lambda \\\\}`\n- shrinkage: :math:`\\\\hat{w}(x,a) := (\\\\lambda w(x,a)) / (\\\\lambda + w^2(x,a))`\n- where :math:`\\\\tau` and :math:`\\\\lambda` are hyperparameters.\n+ where :math:`\\\\lambda` and :math:`\\\\lambda` are hyperparameters.\nq_hat: array-like, shape (n_rounds,), default=None\nEstimated expected reward given context :math:`x_t` and action :math:`a_t`.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -46,6 +46,9 @@ def test_dr_init_using_invalid_inputs(\nwith pytest.raises(err, match=f\"{description}*\"):\n_ = DoublyRobust(lambda_=lambda_)\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = SwitchDoublyRobust(lambda_=lambda_)\n+\nwith pytest.raises(err, match=f\"{description}*\"):\n_ = DoublyRobustWithShrinkage(lambda_=lambda_)\n@@ -157,178 +160,54 @@ def test_dr_tuning_init_using_invalid_inputs(\n)\nwith pytest.raises(err, match=f\"{description}*\"):\n- _ = DoublyRobustWithShrinkageTuning(\n+ _ = SwitchDoublyRobustTuning(\nuse_bias_upper_bound=use_bias_upper_bound,\ndelta=delta,\nlambdas=lambdas,\n)\n-\n-# tau, err, description\n-invalid_input_of_switch_dr_init = [\n- (\n- \"\",\n- TypeError,\n- r\"`tau` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n- ),\n- (\n- None,\n- TypeError,\n- r\"`tau` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n- ),\n- (-1.0, ValueError, \"`tau`= -1.0, must be >= 0.0.\"),\n- (np.nan, ValueError, \"tau must not be nan\"),\n-]\n-\n-\[email protected](\n- \"tau, err, description\",\n- invalid_input_of_switch_dr_init,\n-)\n-def test_switch_dr_init_using_invalid_inputs(\n- tau,\n- err,\n- description,\n-):\n- with pytest.raises(err, match=f\"{description}*\"):\n- _ = SwitchDoublyRobust(tau=tau)\n-\n-\n-# taus, err, description\n-invalid_input_of_switch_dr_tuning_init = [\n- (\n- \"\", #\n- True,\n- 0.05,\n- TypeError,\n- \"taus must be a list\",\n- ),\n- (\n- None, #\n- True,\n- 0.05,\n- TypeError,\n- \"taus must be a list\",\n- ),\n- (\n- [\"\"], #\n- True,\n- 0.05,\n- TypeError,\n- r\"`an element of taus` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n- ),\n- (\n- [None], #\n- True,\n- 0.05,\n- TypeError,\n- r\"`an element of taus` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n- ),\n- (\n- [], #\n- True,\n- 0.05,\n- ValueError,\n- \"taus must not be empty\",\n- ),\n- ([-1.0], True, 0.05, ValueError, \"`an element of taus`= -1.0, must be >= 0.0.\"),\n- ([np.nan], True, 0.05, ValueError, \"an element of taus must not be nan\"),\n- (\n- [1],\n- \"\", #\n- 0.05,\n- TypeError,\n- \"`use_bias_upper_bound` must be a bool\",\n- ),\n- (\n- [1],\n- None, #\n- 0.05,\n- TypeError,\n- \"`use_bias_upper_bound` must be a bool\",\n- ),\n- (\n- [1],\n- True,\n- \"\", #\n- TypeError,\n- \"`delta` must be an instance of <class 'float'>\",\n- ),\n- (\n- [1],\n- True,\n- None, #\n- TypeError,\n- \"`delta` must be an instance of <class 'float'>\",\n- ),\n- (\n- [1],\n- True,\n- -1.0, #\n- ValueError,\n- \"`delta`= -1.0, must be >= 0.0.\",\n- ),\n- (\n- [1],\n- True,\n- 1.1, #\n- ValueError,\n- \"`delta`= 1.1, must be <= 1.0.\",\n- ),\n-]\n-\n-\[email protected](\n- \"taus, use_bias_upper_bound, delta, err, description\",\n- invalid_input_of_switch_dr_tuning_init,\n-)\n-def test_switch_dr_tuning_init_using_invalid_inputs(\n- taus,\n- use_bias_upper_bound,\n- delta,\n- err,\n- description,\n-):\nwith pytest.raises(err, match=f\"{description}*\"):\n- _ = SwitchDoublyRobustTuning(\n- use_bias_upper_bound=use_bias_upper_bound, delta=delta, taus=taus\n+ _ = DoublyRobustWithShrinkageTuning(\n+ use_bias_upper_bound=use_bias_upper_bound,\n+ delta=delta,\n+ lambdas=lambdas,\n)\nvalid_input_of_dr_init = [\n- (np.inf, \"infinite lambda_tau\"),\n- (3.0, \"float lambda_tau\"),\n- (2, \"integer lambda_tau\"),\n+ (np.inf, \"infinite lambda_\"),\n+ (3.0, \"float lambda_\"),\n+ (2, \"integer lambda_\"),\n]\[email protected](\n- \"lambda_tau, description\",\n+ \"lambda_, description\",\nvalid_input_of_dr_init,\n)\n-def test_dr_init_using_valid_input_data(lambda_tau: float, description: str) -> None:\n- _ = DoublyRobust(lambda_=lambda_tau)\n- _ = DoublyRobustWithShrinkage(lambda_=lambda_tau)\n- _ = SwitchDoublyRobust(tau=lambda_tau)\n+def test_dr_init_using_valid_input_data(lambda_: float, description: str) -> None:\n+ _ = DoublyRobust(lambda_=lambda_)\n+ _ = DoublyRobustWithShrinkage(lambda_=lambda_)\n+ _ = SwitchDoublyRobust(lambda_=lambda_)\nvalid_input_of_dr_tuning_init = [\n- ([3.0, np.inf, 100.0], \"float lambda_tau\"),\n- ([2], \"integer lambda_tau\"),\n+ ([3.0, np.inf, 100.0], \"float lambda_\"),\n+ ([2], \"integer lambda_\"),\n]\[email protected](\n- \"lambdas_taus, description\",\n+ \"lambdas, description\",\nvalid_input_of_dr_tuning_init,\n)\n-def test_dr_tuning_init_using_valid_input_data(lambdas_taus, description):\n- _ = DoublyRobustTuning(lambdas=lambdas_taus)\n+def test_dr_tuning_init_using_valid_input_data(lambdas, description):\n+ _ = DoublyRobustTuning(lambdas=lambdas)\n_ = DoublyRobustWithShrinkageTuning(\n- lambdas=lambdas_taus,\n+ lambdas=lambdas,\n)\n_ = SwitchDoublyRobustTuning(\n- taus=lambdas_taus,\n+ lambdas=lambdas,\n)\n@@ -342,11 +221,11 @@ dr_os_tuning = DoublyRobustWithShrinkageTuning(\n)\ndr_os_max = DoublyRobustWithShrinkage(lambda_=np.inf)\nsndr = SelfNormalizedDoublyRobust()\n-switch_dr_0 = SwitchDoublyRobust(tau=0.0)\n+switch_dr_0 = SwitchDoublyRobust(lambda_=0.0)\nswitch_dr_tuning = SwitchDoublyRobustTuning(\n- taus=[1, 100], estimator_name=\"switch_dr_tuning\"\n+ lambdas=[1, 100], estimator_name=\"switch_dr_tuning\"\n)\n-switch_dr_max = SwitchDoublyRobust(tau=np.inf)\n+switch_dr_max = SwitchDoublyRobust(lambda_=np.inf)\ndr_estimators = [\ndr,\n@@ -786,9 +665,9 @@ def test_dr_variants_using_valid_input_data(\ndescription: str,\n) -> None:\n# check dr variants\n- switch_dr = SwitchDoublyRobust(tau=hyperparameter)\n+ switch_dr = SwitchDoublyRobust(lambda_=hyperparameter)\nswitch_dr_tuning = SwitchDoublyRobustTuning(\n- taus=[hyperparameter, hyperparameter * 10]\n+ lambdas=[hyperparameter, hyperparameter * 10]\n)\ndr_os = DoublyRobustWithShrinkage(lambda_=hyperparameter)\ndr_os_tuning = DoublyRobustWithShrinkageTuning(\n@@ -1048,7 +927,7 @@ def test_switch_dr_using_random_evaluation_policy(\nswitch_dr_max_value = switch_dr_max.estimate_policy_value(**input_dict)\nassert (\ndm_value == switch_dr_0_value\n- ), \"SwitchDR (tau=0) should be the same as DirectMethod\"\n+ ), \"SwitchDR (lambda_=0) should be the same as DirectMethod\"\nassert (\ndr_value == switch_dr_max_value\n- ), \"SwitchDR (tau=1e10) should be the same as DoublyRobust\"\n+ ), \"SwitchDR (lambda_=1e10) should be the same as DoublyRobust\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_offline_estimation_performance.py",
"new_path": "tests/ope/test_offline_estimation_performance.py",
"diff": "@@ -142,10 +142,10 @@ ope_estimators = [\nDoublyRobust(),\nDoublyRobustTuning(lambdas=[100, 1000, np.inf], estimator_name=\"dr (tuning)\"),\nSelfNormalizedDoublyRobust(),\n- SwitchDoublyRobust(tau=1.0, estimator_name=\"switch-dr (tau=1)\"),\n- SwitchDoublyRobust(tau=100.0, estimator_name=\"switch-dr (tau=100)\"),\n+ SwitchDoublyRobust(lambda_=1.0, estimator_name=\"switch-dr (lambda_=1)\"),\n+ SwitchDoublyRobust(lambda_=100.0, estimator_name=\"switch-dr (lambda_=100)\"),\nSwitchDoublyRobustTuning(\n- taus=[100, 1000, np.inf], estimator_name=\"switch-dr (tuning)\"\n+ lambdas=[100, 1000, np.inf], estimator_name=\"switch-dr (tuning)\"\n),\nDoublyRobustWithShrinkage(lambda_=1.0, estimator_name=\"dr-os (lambda=1)\"),\nDoublyRobustWithShrinkage(lambda_=100.0, estimator_name=\"dr-os (lambda=100)\"),\n@@ -249,8 +249,10 @@ def test_offline_estimation_performance(\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr (tuning)\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"sndr\"]\n- assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (tau=1)\"]\n- assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (tau=100)\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (lambda_=1)\"]\n+ assert (\n+ relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (lambda_=100)\"]\n+ )\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (tuning)\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr-os (lambda=1)\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr-os (lambda=100)\"]\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | change tau of Switch to lambda_ to make the name consistent across estimators |
641,014 | 03.09.2021 18:56:09 | 14,400 | 175b70d7073a8fa05bc22a3dc1e0221ba9ce2527 | adjust to the change of the name of the argument | [
{
"change_type": "MODIFY",
"old_path": "examples/multiclass/README.md",
"new_path": "examples/multiclass/README.md",
"diff": "@@ -76,8 +76,8 @@ python evaluate_off_policy_estimators.py\\\n# snipw 0.006797 0.004094\n# dr 0.007780 0.004492\n# sndr 0.007210 0.004089\n-# switch-dr (tau=1) 0.173282 0.020025\n-# switch-dr (tau=100) 0.007780 0.004492\n+# switch-dr (lambda=1) 0.173282 0.020025\n+# switch-dr (lambda=100) 0.007780 0.004492\n# dr-os (lambda=1) 0.079629 0.014008\n# dr-os (lambda=100) 0.008031 0.004634\n# =============================================\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/multiclass/evaluate_off_policy_estimators.py",
"new_path": "examples/multiclass/evaluate_off_policy_estimators.py",
"diff": "@@ -48,8 +48,8 @@ ope_estimators = [\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\nSelfNormalizedDoublyRobust(),\n- SwitchDoublyRobust(tau=1.0, estimator_name=\"switch-dr (tau=1)\"),\n- SwitchDoublyRobust(tau=100.0, estimator_name=\"switch-dr (tau=100)\"),\n+ SwitchDoublyRobust(lambda_=1.0, estimator_name=\"switch-dr (lambda=1)\"),\n+ SwitchDoublyRobust(lambda_=100.0, estimator_name=\"switch-dr (lambda=100)\"),\nDoublyRobustWithShrinkage(lambda_=1.0, estimator_name=\"dr-os (lambda=1)\"),\nDoublyRobustWithShrinkage(lambda_=100.0, estimator_name=\"dr-os (lambda=100)\"),\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/synthetic/README.md",
"new_path": "examples/synthetic/README.md",
"diff": "@@ -69,8 +69,8 @@ python evaluate_off_policy_estimators.py\\\n# snipw 0.007543 0.005196\n# dr 0.008099 0.006659\n# sndr 0.008054 0.004911\n-# switch-dr (tau=1) 0.195878 0.012146\n-# switch-dr (tau=100) 0.008099 0.006659\n+# switch-dr (lambda=1) 0.195878 0.012146\n+# switch-dr (lambda=100) 0.008099 0.006659\n# dr-os (lambda=1) 0.195642 0.012151\n# dr-os (lambda=100) 0.175285 0.012801\n# =============================================\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/synthetic/evaluate_off_policy_estimators.py",
"new_path": "examples/synthetic/evaluate_off_policy_estimators.py",
"diff": "@@ -45,8 +45,8 @@ ope_estimators = [\nSelfNormalizedInverseProbabilityWeighting(),\nDoublyRobust(),\nSelfNormalizedDoublyRobust(),\n- SwitchDoublyRobust(tau=1.0, estimator_name=\"switch-dr (tau=1)\"),\n- SwitchDoublyRobust(tau=100.0, estimator_name=\"switch-dr (tau=100)\"),\n+ SwitchDoublyRobust(lambda_=1.0, estimator_name=\"switch-dr (lambda=1)\"),\n+ SwitchDoublyRobust(lambda_=100.0, estimator_name=\"switch-dr (lambda=100)\"),\nDoublyRobustWithShrinkage(lambda_=1.0, estimator_name=\"dr-os (lambda=1)\"),\nDoublyRobustWithShrinkage(lambda_=100.0, estimator_name=\"dr-os (lambda=100)\"),\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -927,7 +927,7 @@ def test_switch_dr_using_random_evaluation_policy(\nswitch_dr_max_value = switch_dr_max.estimate_policy_value(**input_dict)\nassert (\ndm_value == switch_dr_0_value\n- ), \"SwitchDR (lambda_=0) should be the same as DirectMethod\"\n+ ), \"SwitchDR (lambda=0) should be the same as DirectMethod\"\nassert (\ndr_value == switch_dr_max_value\n- ), \"SwitchDR (lambda_=1e10) should be the same as DoublyRobust\"\n+ ), \"SwitchDR (lambda=1e10) should be the same as DoublyRobust\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_offline_estimation_performance.py",
"new_path": "tests/ope/test_offline_estimation_performance.py",
"diff": "@@ -142,8 +142,8 @@ ope_estimators = [\nDoublyRobust(),\nDoublyRobustTuning(lambdas=[100, 1000, np.inf], estimator_name=\"dr (tuning)\"),\nSelfNormalizedDoublyRobust(),\n- SwitchDoublyRobust(lambda_=1.0, estimator_name=\"switch-dr (lambda_=1)\"),\n- SwitchDoublyRobust(lambda_=100.0, estimator_name=\"switch-dr (lambda_=100)\"),\n+ SwitchDoublyRobust(lambda_=1.0, estimator_name=\"switch-dr (lambda=1)\"),\n+ SwitchDoublyRobust(lambda_=100.0, estimator_name=\"switch-dr (lambda=100)\"),\nSwitchDoublyRobustTuning(\nlambdas=[100, 1000, np.inf], estimator_name=\"switch-dr (tuning)\"\n),\n@@ -249,10 +249,8 @@ def test_offline_estimation_performance(\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr (tuning)\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"sndr\"]\n- assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (lambda_=1)\"]\n- assert (\n- relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (lambda_=100)\"]\n- )\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (lambda=1)\"]\n+ assert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (lambda=100)\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"switch-dr (tuning)\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr-os (lambda=1)\"]\nassert relative_ee_df_mean[\"random\"] > relative_ee_df_mean[\"dr-os (lambda=100)\"]\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | adjust to the change of the name of the argument |
641,014 | 03.09.2021 20:07:39 | 14,400 | e488b0e8033f29221a203069ad7a26bf6f11c302 | adjust the benchmark code to the change in hyperparam name | [
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/README.md",
"new_path": "benchmark/ope/README.md",
"diff": "@@ -90,7 +90,7 @@ It is possible to run multiple experimental settings easily by using the `--mult\nFor example, the following script sweeps over all simulations including the three campaigns ('all', 'men', and 'women') and two different behavior policies ('random' and 'bts').\n```bash\n-poetry run python benchmark_ope_estimators.py setting.campaign=all,men,women setting.behavior_policy=random.bts --multirun\n+poetry run python benchmark_ope_estimators.py setting.campaign=all,men,women setting.behavior_policy=random,bts --multirun\n```\nThe experimental results (including the pairwise hypothesis test results) will be store in the `logs/` directory.\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/benchmark_ope_estimators.py",
"new_path": "benchmark/ope/benchmark_ope_estimators.py",
"diff": "@@ -45,14 +45,13 @@ def main(cfg: DictConfig) -> None:\n# compared ope estimators\nlambdas = list(dict(cfg.estimator_hyperparams)[\"lambdas\"])\n- taus = list(dict(cfg.estimator_hyperparams)[\"taus\"])\nope_estimators = [\nInverseProbabilityWeighting(estimator_name=\"IPW\"),\nSelfNormalizedInverseProbabilityWeighting(estimator_name=\"SNIPW\"),\nDirectMethod(estimator_name=\"DM\"),\nDoublyRobust(estimator_name=\"DR\"),\nSelfNormalizedDoublyRobust(estimator_name=\"SNDR\"),\n- SwitchDoublyRobustTuning(taus=taus, estimator_name=\"Switch-DR\"),\n+ SwitchDoublyRobustTuning(lambdas=lambdas, estimator_name=\"Switch-DR\"),\nDoublyRobustWithShrinkageTuning(lambdas=lambdas, estimator_name=\"DRos\"),\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/conf/estimator_hyperparams/default.yaml",
"new_path": "benchmark/ope/conf/estimator_hyperparams/default.yaml",
"diff": "@@ -10,13 +10,3 @@ lambdas:\n- 1000\n- 5000\n- 10000\n-taus:\n- - 1\n- - 5\n- - 10\n- - 50\n- - 100\n- - 500\n- - 1000\n- - 5000\n- - 10000\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | adjust the benchmark code to the change in hyperparam name |
641,014 | 04.09.2021 14:31:53 | 14,400 | 4ed27edf8c09cb562eabe92f209195454355f5c1 | modify defaults and fix bugs | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -417,7 +417,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nlearning_rate_init: int, default=0.0001\nInitial learning rate for SGD, Adagrad, and Adam.\n- max_iter: int, default=200\n+ max_iter: int, default=100\nNumber of epochs for SGD, Adagrad, and Adam.\nshuffle: bool, default=True\n@@ -487,7 +487,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\nlearning_rate_init: float = 0.0001\n- max_iter: int = 200\n+ max_iter: int = 100\nshuffle: bool = True\nrandom_state: Optional[int] = None\ntol: float = 1e-4\n@@ -711,9 +711,9 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n\"\"\"\nif self.batch_size == \"auto\":\n- self.batch_size_ = min(200, context.shape[0])\n+ batch_size_ = min(200, context.shape[0])\nelif isinstance(self.batch_size, int) and self.batch_size > 0:\n- self.batch_size_ = self.batch_size\n+ batch_size_ = self.batch_size\nelse:\nraise ValueError(\"batch_size must be a positive integer or 'auto'\")\n@@ -738,12 +738,12 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n)\ntraining_data_loader = torch.utils.data.DataLoader(\ntraining_dataset,\n- batch_size=self.batch_size_,\n+ batch_size=batch_size_,\nshuffle=self.shuffle,\n)\nvalidation_data_loader = torch.utils.data.DataLoader(\nvalidation_dataset,\n- batch_size=self.batch_size_,\n+ batch_size=batch_size_,\nshuffle=self.shuffle,\n)\n@@ -751,7 +751,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\ndata_loader = torch.utils.data.DataLoader(\ndataset,\n- batch_size=self.batch_size_,\n+ batch_size=batch_size_,\nshuffle=self.shuffle,\n)\n@@ -969,21 +969,20 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n)\nelif self.off_policy_objective == \"ipw\":\n- idx_tensor = torch.arange(self.batch_size_, dtype=torch.long)\n+ n_rounds = action.shape[0]\n+ idx_tensor = torch.arange(n_rounds, dtype=torch.long)\niw = action_dist[idx_tensor, action, 0] / pscore\nbaseline = reward.mean()\nestimated_policy_value_arr = iw * (reward - baseline)\nelif self.off_policy_objective == \"dr\":\n- idx_tensor = torch.arange(self.batch_size_, dtype=torch.long)\n+ idx_tensor = torch.arange(n_rounds, dtype=torch.long)\niw = action_dist[idx_tensor, action, 0] / pscore\nq_hat_baseline = self.q_func_estimator.predict(\ncontext=context,\naction_dist=action_dist,\n)\n- action_dist_ = torch.zeros(\n- (self.batch_size_, self.n_actions, self.len_list)\n- )\n+ action_dist_ = torch.zeros((n_rounds, self.n_actions, self.len_list))\naction_dist_[idx_tensor, action, 0] = 1\nq_hat_actions = self.q_func_estimator.predict(\ncontext=context,\n@@ -1014,7 +1013,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nAction choice probabilities of evaluation policy (must be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n\"\"\"\n- idx_tensor = torch.arange(self.batch_size_, dtype=torch.long)\n+ idx_tensor = torch.arange(action.shape[0], dtype=torch.long)\niw = action_dist[idx_tensor, action, 0] / pscore\nreturn torch.log(iw.mean())\n@@ -1180,7 +1179,7 @@ class QFuncEstimator:\nlearning_rate_init: int, default=0.0001\nInitial learning rate for SGD, Adagrad, and Adam.\n- max_iter: int, default=200\n+ max_iter: int, default=100\nMaximum number of iterations for L-BFGS.\nNumber of epochs for SGD, Adagrad, and Adam.\n@@ -1246,7 +1245,7 @@ class QFuncEstimator:\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\nlearning_rate_init: float = 0.0001\n- max_iter: int = 200\n+ max_iter: int = 100\nshuffle: bool = True\nrandom_state: Optional[int] = None\ntol: float = 1e-4\n@@ -1415,9 +1414,9 @@ class QFuncEstimator:\n\"\"\"\nif self.batch_size == \"auto\":\n- self.batch_size_ = min(200, context.shape[0])\n+ batch_size_ = min(200, context.shape[0])\nelif isinstance(self.batch_size, int) and self.batch_size > 0:\n- self.batch_size_ = self.batch_size\n+ batch_size_ = self.batch_size\nelse:\nraise ValueError(\"batch_size must be a positive integer or 'auto'\")\n@@ -1441,12 +1440,12 @@ class QFuncEstimator:\n)\ntraining_data_loader = torch.utils.data.DataLoader(\ntraining_dataset,\n- batch_size=self.batch_size_,\n+ batch_size=batch_size_,\nshuffle=self.shuffle,\n)\nvalidation_data_loader = torch.utils.data.DataLoader(\nvalidation_dataset,\n- batch_size=self.batch_size_,\n+ batch_size=batch_size_,\nshuffle=self.shuffle,\n)\n@@ -1454,7 +1453,7 @@ class QFuncEstimator:\ndata_loader = torch.utils.data.DataLoader(\ndataset,\n- batch_size=self.batch_size_,\n+ batch_size=batch_size_,\nshuffle=self.shuffle,\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline_continuous.py",
"new_path": "obp/policy/offline_continuous.py",
"diff": "@@ -81,7 +81,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nlearning_rate_init: int, default=0.0001\nInitial learning rate for SGD, Adagrad, and Adam.\n- max_iter: int, default=200\n+ max_iter: int, default=100\nNumber of epochs for SGD, Adagrad, and Adam.\nshuffle: bool, default=True\n@@ -157,7 +157,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\nlearning_rate_init: float = 0.0001\n- max_iter: int = 200\n+ max_iter: int = 100\nshuffle: bool = True\nrandom_state: Optional[int] = None\ntol: float = 1e-4\n@@ -696,7 +696,7 @@ class QFuncEstimatorForContinuousAction:\nlearning_rate_init: int, default=0.0001\nInitial learning rate for SGD, Adagrad, and Adam.\n- max_iter: int, default=200\n+ max_iter: int, default=100\nNumber of epochs for SGD, Adagrad, and Adam.\nshuffle: bool, default=True\n@@ -766,7 +766,7 @@ class QFuncEstimatorForContinuousAction:\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\nlearning_rate_init: float = 0.0001\n- max_iter: int = 200\n+ max_iter: int = 100\nshuffle: bool = True\nrandom_state: Optional[int] = None\ntol: float = 1e-4\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | modify defaults and fix bugs |
641,006 | 05.09.2021 15:42:58 | -32,400 | 97cadef8234ffcee3d73365a84c67f6d400264b6 | fix multiclass.py | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/multiclass.py",
"new_path": "obp/dataset/multiclass.py",
"diff": "@@ -9,7 +9,7 @@ import numpy as np\nfrom scipy.stats import rankdata\nfrom sklearn.base import ClassifierMixin, is_classifier, clone\nfrom sklearn.model_selection import train_test_split\n-from sklearn.utils import check_random_state, check_X_y\n+from sklearn.utils import check_random_state, check_X_y, check_scalar\nfrom .base import BaseBanditDataset\nfrom ..types import BanditFeedback\n@@ -152,10 +152,9 @@ class MultiClassToBanditReduction(BaseBanditDataset):\n\"\"\"Initialize Class.\"\"\"\nif not is_classifier(self.base_classifier_b):\nraise ValueError(\"base_classifier_b must be a classifier\")\n- if not isinstance(self.alpha_b, float) or not (0.0 <= self.alpha_b < 1.0):\n- raise ValueError(\n- f\"alpha_b must be a float in the [0,1) interval, but {self.alpha_b} is given\"\n- )\n+ check_scalar(self.alpha_b, \"alpha_b\", float, min_val=0.0)\n+ if self.alpha_b >= 1.0:\n+ raise ValueError(f\"`alpha_b`= {self.alpha_b}, must be < 1.0.\")\nself.X, y = check_X_y(X=self.X, y=self.y, ensure_2d=True, multi_output=False)\nself.y = (rankdata(y, \"dense\") - 1).astype(int) # re-index action\n@@ -280,10 +279,7 @@ class MultiClassToBanditReduction(BaseBanditDataset):\naxis 2 represents the length of list; it is always 1 in the current implementation.\n\"\"\"\n- if not isinstance(alpha_e, float) or not (0.0 <= alpha_e <= 1.0):\n- raise ValueError(\n- f\"alpha_e must be a float in the [0,1] interval, but {alpha_e} is given\"\n- )\n+ check_scalar(alpha_e, \"alpha_e\", float, min_val=0.0, max_val=1.0)\n# train a base ML classifier\nif base_classifier_e is None:\nbase_clf_e = clone(self.base_classifier_b)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix multiclass.py |
641,006 | 05.09.2021 15:43:24 | -32,400 | 1860af79db6d3672b6270658a86796be3bfbf1f4 | fix synthetic.py and the test | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -146,14 +146,8 @@ class SyntheticBanditDataset(BaseBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not isinstance(self.n_actions, int) or self.n_actions <= 1:\n- raise ValueError(\n- f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n- )\n- if not isinstance(self.dim_context, int) or self.dim_context <= 0:\n- raise ValueError(\n- f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n- )\n+ check_scalar(self.n_actions, \"n_actions\", int, min_val=2)\n+ check_scalar(self.dim_context, \"dim_context\", int, min_val=1)\nif RewardType(self.reward_type) not in [\nRewardType.BINARY,\nRewardType.CONTINUOUS,\n@@ -164,7 +158,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\ncheck_scalar(self.reward_std, \"reward_std\", (int, float), min_val=0)\ncheck_scalar(self.tau, \"tau\", (int, float), min_val=0)\nif self.random_state is None:\n- raise ValueError(\"random_state must be given\")\n+ raise ValueError(\"`random_state` must be given\")\nself.random_ = check_random_state(self.random_state)\nif self.reward_function is None:\nself.expected_reward = self.sample_contextfree_expected_reward()\n@@ -268,11 +262,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nGenerated synthetic bandit feedback dataset.\n\"\"\"\n- if not isinstance(n_rounds, int) or n_rounds <= 0:\n- raise ValueError(\n- f\"n_rounds must be a positive integer, but {n_rounds} is given\"\n- )\n-\n+ check_scalar(n_rounds, \"n_rounds\", int, min_val=1)\ncontext = self.random_.normal(size=(n_rounds, self.dim_context))\n# sample actions for each round based on the behavior policy\nif self.behavior_policy_function is None:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -15,14 +15,14 @@ def test_synthetic_init():\nwith pytest.raises(ValueError):\nSyntheticBanditDataset(n_actions=1)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nSyntheticBanditDataset(n_actions=\"3\")\n# dim_context\nwith pytest.raises(ValueError):\nSyntheticBanditDataset(n_actions=2, dim_context=0)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nSyntheticBanditDataset(n_actions=2, dim_context=\"2\")\n# reward_type\n@@ -129,7 +129,7 @@ def test_synthetic_obtain_batch_bandit_feedback():\ndataset = SyntheticBanditDataset(n_actions=2)\ndataset.obtain_batch_bandit_feedback(n_rounds=0)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\ndataset = SyntheticBanditDataset(n_actions=2)\ndataset.obtain_batch_bandit_feedback(n_rounds=\"3\")\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix synthetic.py and the test |
641,006 | 05.09.2021 16:00:33 | -32,400 | 60da3817848c2b2f18af48a4ecc19b2b347e967c | fix synthetic_slate.py and the test | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -190,24 +190,14 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not isinstance(self.n_unique_action, int) or self.n_unique_action <= 1:\n- raise ValueError(\n- f\"n_unique_action must be an integer larger than 1, but {self.n_unique_action} is given\"\n- )\n- if not isinstance(self.len_list, int) or self.len_list <= 1:\n- raise ValueError(\n- f\"len_list must be an integer larger than 1, but {self.len_list} is given\"\n- )\n- if not self.is_factorizable and self.len_list > self.n_unique_action:\n- raise ValueError(\n- f\"len_list must be equal to or smaller than n_unique_action, but {self.len_list} is given\"\n- )\n- if not isinstance(self.dim_context, int) or self.dim_context <= 0:\n- raise ValueError(\n- f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n- )\n- if not isinstance(self.random_state, int):\n- raise ValueError(\"random_state must be an integer\")\n+ check_scalar(self.n_unique_action, \"n_unique_action\", int, min_val=2)\n+ if self.is_factorizable:\n+ max_len_list = None\n+ else:\n+ max_len_list = self.n_unique_action\n+ check_scalar(self.len_list, \"len_list\", int, min_val=2, max_val=max_len_list)\n+\n+ check_scalar(self.dim_context, \"dim_context\", int, min_val=1)\nself.random_ = check_random_state(self.random_state)\nif self.reward_type not in [\n\"binary\",\n@@ -744,11 +734,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nGenerated synthetic slate bandit feedback dataset.\n\"\"\"\n- if not isinstance(n_rounds, int) or n_rounds <= 0:\n- raise ValueError(\n- f\"n_rounds must be a positive integer, but {n_rounds} is given\"\n- )\n-\n+ check_scalar(n_rounds, \"n_rounds\", int, min_val=1)\ncontext = self.random_.normal(size=(n_rounds, self.dim_context))\n# sample actions for each round based on the behavior policy\nif self.behavior_policy_function is None:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_slate.py",
"new_path": "tests/dataset/test_synthetic_slate.py",
"diff": "@@ -25,8 +25,8 @@ invalid_input_of_init = [\n\"pbm\",\n1.0,\n1,\n- ValueError,\n- \"n_unique_action must be an integer larger than 1\",\n+ TypeError,\n+ \"`n_unique_action` must be an instance of <class 'int'>, not <class 'str'>.\",\n),\n(\n1,\n@@ -39,7 +39,7 @@ invalid_input_of_init = [\n1.0,\n1,\nValueError,\n- \"n_unique_action must be an integer larger than 1\",\n+ \"`n_unique_action`= 1, must be >= 2.\",\n),\n(\n5,\n@@ -51,8 +51,8 @@ invalid_input_of_init = [\n\"pbm\",\n1.0,\n1,\n- ValueError,\n- \"len_list must be an integer larger than\",\n+ TypeError,\n+ \"`len_list` must be an instance of <class 'int'>, not <class 'str'>.\",\n),\n(\n5,\n@@ -65,7 +65,7 @@ invalid_input_of_init = [\n1.0,\n1,\nValueError,\n- \"len_list must be an integer larger than\",\n+ \"`len_list`= -1, must be >= 2.\",\n),\n(\n5,\n@@ -78,7 +78,7 @@ invalid_input_of_init = [\n1.0,\n1,\nValueError,\n- \"len_list must be equal to or smaller than\",\n+ \"`len_list`= 10, must be <= 5.\",\n),\n(\n5,\n@@ -91,7 +91,7 @@ invalid_input_of_init = [\n1.0,\n1,\nValueError,\n- \"dim_context must be a positive integer\",\n+ \"`dim_context`= 0, must be >= 1.\",\n),\n(\n5,\n@@ -103,8 +103,8 @@ invalid_input_of_init = [\n\"pbm\",\n1.0,\n1,\n- ValueError,\n- \"dim_context must be a positive integer\",\n+ TypeError,\n+ \"`dim_context` must be an instance of <class 'int'>, not <class 'str'>.\",\n),\n(\n5,\n@@ -195,20 +195,7 @@ invalid_input_of_init = [\n1.0,\n\"x\",\nValueError,\n- \"random_state must be an integer\",\n- ),\n- (\n- 5,\n- 3,\n- 2,\n- \"binary\",\n- \"independent\",\n- \"exponential\",\n- \"pbm\",\n- 1.0,\n- None,\n- ValueError,\n- \"random_state must be an integer\",\n+ \"'x' cannot be used to seed a numpy.random.RandomState instance\",\n),\n]\n@@ -464,7 +451,7 @@ def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_poli\n)\nwith pytest.raises(ValueError):\n_ = dataset.obtain_batch_bandit_feedback(n_rounds=-1)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\n_ = dataset.obtain_batch_bandit_feedback(n_rounds=\"a\")\n# obtain feedback\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix synthetic_slate.py and the test |
641,006 | 05.09.2021 17:01:08 | -32,400 | 8578e0c4930fc57edf45d635b2e8cda81bb387a4 | add in error message | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_continuous.py",
"new_path": "obp/dataset/synthetic_continuous.py",
"diff": "@@ -243,7 +243,7 @@ class SyntheticContinuousBanditDataset(BaseBanditDataset):\ncheck_array(array=action, name=\"action\", expected_dim=1)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n- \"Expected `context.shape[1] == self.dim_context`, found it False\"\n+ \"Expected `context.shape[1] == self.dim_context`, but found it False\"\n)\nif context.shape[0] != action.shape[0]:\nraise ValueError(\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -877,7 +877,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\n)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n- \"Expected `context.shape[1] == self.dim_context`, found it False\"\n+ \"Expected `context.shape[1] == self.dim_context`, but found it False\"\n)\nif evaluation_policy_logit_.shape[0] != context.shape[0]:\nraise ValueError(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add in error message |
641,006 | 05.09.2021 17:51:49 | -32,400 | f8a4832440bcf5a64299833b6e8fbcc7f4d23d67 | fix contextfree check-scalar | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/base.py",
"new_path": "obp/policy/base.py",
"diff": "@@ -7,7 +7,7 @@ from dataclasses import dataclass\nfrom typing import Optional\nimport numpy as np\n-from sklearn.utils import check_random_state\n+from sklearn.utils import check_random_state, check_scalar\nfrom .policy_type import PolicyType\n@@ -40,26 +40,9 @@ class BaseContextFreePolicy(metaclass=ABCMeta):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not isinstance(self.n_actions, int) or self.n_actions <= 1:\n- raise ValueError(\n- f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n- )\n-\n- if not isinstance(self.len_list, int) or self.len_list <= 0:\n- raise ValueError(\n- f\"len_list must be a positive integer, but {self.len_list} is given\"\n- )\n-\n- if not isinstance(self.batch_size, int) or self.batch_size <= 0:\n- raise ValueError(\n- f\"batch_size must be a positive integer, but {self.batch_size} is given\"\n- )\n-\n- if self.n_actions < self.len_list:\n- raise ValueError(\n- f\"n_actions >= len_list should hold, but n_actions is {self.n_actions} and len_list is {self.len_list}\"\n- )\n-\n+ check_scalar(self.n_actions, \"n_actions\", int, min_val=2)\n+ check_scalar(self.len_list, \"len_list\", int, min_val=1, max_val=self.n_actions)\n+ check_scalar(self.batch_size, \"batch_size\", int, min_val=1)\nself.n_trial = 0\nself.random_ = check_random_state(self.random_state)\nself.action_counts = np.zeros(self.n_actions, dtype=int)\n@@ -124,29 +107,10 @@ class BaseContextualPolicy(metaclass=ABCMeta):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if not isinstance(self.dim, int) or self.dim <= 0:\n- raise ValueError(f\"dim must be a positive integer, but {self.dim} is given\")\n-\n- if not isinstance(self.n_actions, int) or self.n_actions <= 1:\n- raise ValueError(\n- f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n- )\n-\n- if not isinstance(self.len_list, int) or self.len_list <= 0:\n- raise ValueError(\n- f\"len_list must be a positive integer, but {self.len_list} is given\"\n- )\n-\n- if not isinstance(self.batch_size, int) or self.batch_size <= 0:\n- raise ValueError(\n- f\"batch_size must be a positive integer, but {self.batch_size} is given\"\n- )\n-\n- if self.n_actions < self.len_list:\n- raise ValueError(\n- f\"n_actions >= len_list should hold, but n_actions is {self.n_actions} and len_list is {self.len_list}\"\n- )\n-\n+ check_scalar(self.dim, \"dim\", int, min_val=1)\n+ check_scalar(self.n_actions, \"n_actions\", int, min_val=2)\n+ check_scalar(self.len_list, \"len_list\", int, min_val=1, max_val=self.n_actions)\n+ check_scalar(self.batch_size, \"batch_size\", int, min_val=1)\nself.n_trial = 0\nself.random_ = check_random_state(self.random_state)\nself.action_counts = np.zeros(self.n_actions, dtype=int)\n@@ -197,20 +161,8 @@ class BaseOfflinePolicyLearner(metaclass=ABCMeta):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if not isinstance(self.n_actions, int) or self.n_actions <= 1:\n- raise ValueError(\n- f\"n_actions must be an integer larger than 1, but {self.n_actions} is given\"\n- )\n-\n- if not isinstance(self.len_list, int) or self.len_list <= 0:\n- raise ValueError(\n- f\"len_list must be a positive integer, but {self.len_list} is given\"\n- )\n-\n- if self.n_actions < self.len_list:\n- raise ValueError(\n- f\"Expected `n_actions >= len_list`, but got n_actions={self.n_actions} < len_list={self.len_list}\"\n- )\n+ check_scalar(self.n_actions, \"n_actions\", int, min_val=2)\n+ check_scalar(self.len_list, \"len_list\", int, min_val=1, max_val=self.n_actions)\n@property\ndef policy_type(self) -> PolicyType:\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/contextfree.py",
"new_path": "obp/policy/contextfree.py",
"diff": "@@ -10,6 +10,7 @@ from dataclasses import dataclass\nfrom typing import Optional\nimport numpy as np\n+from sklearn.utils import check_scalar\nfrom .base import BaseContextFreePolicy\n@@ -51,10 +52,7 @@ class EpsilonGreedy(BaseContextFreePolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n- if not 0 <= self.epsilon <= 1:\n- raise ValueError(\n- f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n- )\n+ check_scalar(self.epsilon, \"epsilon\", float, min_val=0.0, max_val=1.0)\nself.policy_name = f\"egreedy_{self.epsilon}\"\nsuper().__post_init__()\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_contextfree.py",
"new_path": "tests/policy/test_contextfree.py",
"diff": "@@ -12,21 +12,21 @@ def test_contextfree_base_exception():\nwith pytest.raises(ValueError):\nEpsilonGreedy(n_actions=0)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nEpsilonGreedy(n_actions=\"3\")\n# invalid len_list\nwith pytest.raises(ValueError):\nEpsilonGreedy(n_actions=2, len_list=-1)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nEpsilonGreedy(n_actions=2, len_list=\"5\")\n# invalid batch_size\nwith pytest.raises(ValueError):\nEpsilonGreedy(n_actions=2, batch_size=-3)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nEpsilonGreedy(n_actions=2, batch_size=\"3\")\n# invalid relationship between n_actions and len_list\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix contextfree check-scalar |
641,006 | 05.09.2021 17:52:59 | -32,400 | 7d8d0ceaa389876b7c7415318825d947fad269f7 | fix linear and logistic check-scalar | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/linear.py",
"new_path": "obp/policy/linear.py",
"diff": "from dataclasses import dataclass\nimport numpy as np\n+from sklearn.utils import check_scalar\nfrom .base import BaseContextualPolicy\n+from ..utils import check_array\n@dataclass\n@@ -123,10 +125,7 @@ class LinEpsilonGreedy(BaseLinPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if not 0 <= self.epsilon <= 1:\n- raise ValueError(\n- f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n- )\n+ check_scalar(self.epsilon, \"epsilon\", float, min_val=0.0, max_val=1.0)\nself.policy_name = f\"linear_epsilon_greedy_{self.epsilon}\"\nsuper().__post_init__()\n@@ -145,10 +144,9 @@ class LinEpsilonGreedy(BaseLinPolicy):\nList of selected actions.\n\"\"\"\n- if context.ndim != 2 or context.shape[0] != 1:\n- raise ValueError(\n- f\"context shape must be (1, dim_context),but {context.shape} is given\"\n- )\n+ check_array(array=context, name=\"context\", expected_dim=2)\n+ if context.shape[0] != 1:\n+ raise ValueError(\"Expected `context.shape[1] == 1`, but found it False\")\nif self.random_.rand() > self.epsilon:\nself.theta_hat = np.concatenate(\n@@ -189,7 +187,7 @@ class LinUCB(BaseLinPolicy):\nControls the random seed in sampling actions.\nepsilon: float, default=0.\n- Exploration hyperparameter that must take value in the range of [0., 1.].\n+ Exploration hyperparameter that must be greater than or equal to 0.0.\nReferences\n--------------\n@@ -203,10 +201,7 @@ class LinUCB(BaseLinPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if self.epsilon < 0:\n- raise ValueError(\n- f\"epsilon must be positive scalar, but {self.epsilon} is given\"\n- )\n+ check_scalar(self.epsilon, \"epsilon\", float, min_val=0.0)\nself.policy_name = f\"linear_ucb_{self.epsilon}\"\nsuper().__post_init__()\n@@ -225,10 +220,10 @@ class LinUCB(BaseLinPolicy):\nList of selected actions.\n\"\"\"\n- if context.ndim != 2 or context.shape[0] != 1:\n- raise ValueError(\n- f\"context shape must be (1, dim_context),but {context.shape} is given\"\n- )\n+ check_array(array=context, name=\"context\", expected_dim=2)\n+ if context.shape[0] != 1:\n+ raise ValueError(\"Expected `context.shape[1] == 1`, but found it False\")\n+\nself.theta_hat = np.concatenate(\n[\nself.A_inv[i] @ self.b[:, i][:, np.newaxis]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/logistic.py",
"new_path": "obp/policy/logistic.py",
"diff": "@@ -6,7 +6,7 @@ from dataclasses import dataclass\nfrom typing import Optional\nimport numpy as np\n-from sklearn.utils import check_random_state\n+from sklearn.utils import check_random_state, check_scalar\nfrom scipy.optimize import minimize\nfrom .base import BaseContextualPolicy\n@@ -49,15 +49,13 @@ class BaseLogisticPolicy(BaseContextualPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\nsuper().__post_init__()\n- if not isinstance(self.alpha_, float) or self.alpha_ <= 0.0:\n- raise ValueError(\n- f\"alpha_ should be a positive float, but {self.alpha_} is given\"\n- )\n+ check_scalar(self.alpha_, \"alpha_\", float)\n+ if self.alpha_ <= 0.0:\n+ raise ValueError(f\"`alpha_`= {self.alpha_}, must be > 0.0.\")\n- if not isinstance(self.lambda_, float) or self.lambda_ <= 0.0:\n- raise ValueError(\n- f\"lambda_ should be a positive float, but {self.lambda_} is given\"\n- )\n+ check_scalar(self.lambda_, \"lambda_\", float)\n+ if self.alpha_ <= 0.0:\n+ raise ValueError(f\"`lambda_`= {self.lambda_}, must be > 0.0.\")\nself.alpha_list = self.alpha_ * np.ones(self.n_actions)\nself.lambda_list = self.lambda_ * np.ones(self.n_actions)\n@@ -138,10 +136,7 @@ class LogisticEpsilonGreedy(BaseLogisticPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if not 0 <= self.epsilon <= 1:\n- raise ValueError(\n- f\"epsilon must be between 0 and 1, but {self.epsilon} is given\"\n- )\n+ check_scalar(self.epsilon, \"epsilon\", float, min_val=0.0, max_val=1.0)\nself.policy_name = f\"logistic_egreedy_{self.epsilon}\"\nsuper().__post_init__()\n@@ -200,7 +195,7 @@ class LogisticUCB(BaseLogisticPolicy):\nRegularization hyperparameter for the online logistic regression.\nepsilon: float, default=0.\n- Exploration hyperparameter that must take value in the range of [0., 1.].\n+ Exploration hyperparameter that must be greater than or equal to 0.0.\nReferences\n----------\n@@ -213,10 +208,7 @@ class LogisticUCB(BaseLogisticPolicy):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if self.epsilon < 0:\n- raise ValueError(\n- f\"epsilon must be positive scalar, but {self.epsilon} is given\"\n- )\n+ check_scalar(self.epsilon, \"epsilon\", float, min_val=0.0)\nself.policy_name = f\"logistic_ucb_{self.epsilon}\"\nsuper().__post_init__()\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_linear.py",
"new_path": "tests/policy/test_linear.py",
"diff": "@@ -15,7 +15,7 @@ def test_linear_base_exception():\nwith pytest.raises(ValueError):\nLinEpsilonGreedy(n_actions=2, dim=0)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nLinEpsilonGreedy(n_actions=2, dim=\"3\")\n# invalid n_actions\n@@ -25,7 +25,7 @@ def test_linear_base_exception():\nwith pytest.raises(ValueError):\nLinEpsilonGreedy(n_actions=1, dim=2)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nLinEpsilonGreedy(n_actions=\"2\", dim=2)\n# invalid len_list\n@@ -35,7 +35,7 @@ def test_linear_base_exception():\nwith pytest.raises(ValueError):\nLinEpsilonGreedy(n_actions=2, dim=2, len_list=0)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nLinEpsilonGreedy(n_actions=2, dim=2, len_list=\"3\")\n# invalid batch_size\n@@ -45,7 +45,7 @@ def test_linear_base_exception():\nwith pytest.raises(ValueError):\nLinEpsilonGreedy(n_actions=2, dim=2, batch_size=0)\n- with pytest.raises(ValueError):\n+ with pytest.raises(TypeError):\nLinEpsilonGreedy(n_actions=2, dim=2, batch_size=\"10\")\n# invalid relationship between n_actions and len_list\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix linear and logistic check-scalar |
641,006 | 05.09.2021 17:53:13 | -32,400 | e979ca8f3d7cd538f77085f63ca88d508d435314 | fix offline check-scalar | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -484,10 +484,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nif self.len_list != 1:\nraise NotImplementedError(\"currently, len_list > 1 is not supported\")\n- if not isinstance(self.dim_context, int) or self.dim_context <= 0:\n- raise ValueError(\n- f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n- )\n+ check_scalar(self.dim_context, \"dim_context\", int, min_val=1)\nif not callable(self.off_policy_objective):\nraise ValueError(\n@@ -506,10 +503,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nf\"solver must be one of 'adam', 'lbfgs', or 'sgd', but {self.solver} is given\"\n)\n- if not isinstance(self.alpha, float) or self.alpha < 0.0:\n- raise ValueError(\n- f\"alpha must be a non-negative float, but {self.alpha} is given\"\n- )\n+ check_scalar(self.alpha, \"alpha\", float, min_val=0.0)\nif self.batch_size != \"auto\" and (\nnot isinstance(self.batch_size, int) or self.batch_size <= 0\n@@ -518,29 +512,22 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nf\"batch_size must be a positive integer or 'auto', but {self.batch_size} is given\"\n)\n- if (\n- not isinstance(self.learning_rate_init, float)\n- or self.learning_rate_init <= 0.0\n- ):\n+ check_scalar(self.learning_rate_init, \"learning_rate_init\", float)\n+ if self.learning_rate_init <= 0.0:\nraise ValueError(\n- f\"learning_rate_init must be a positive float, but {self.learning_rate_init} is given\"\n+ f\"`learning_rate_init`= {self.learning_rate_init}, must be > 0.0\"\n)\n- if not isinstance(self.max_iter, int) or self.max_iter <= 0:\n- raise ValueError(\n- f\"max_iter must be a positive integer, but {self.max_iter} is given\"\n- )\n+ check_scalar(self.max_iter, \"max_iter\", int, min_val=1)\nif not isinstance(self.shuffle, bool):\nraise ValueError(f\"shuffle must be a bool, but {self.shuffle} is given\")\n- if not isinstance(self.tol, float) or self.tol <= 0.0:\n- raise ValueError(f\"tol must be a positive float, but {self.tol} is given\")\n+ check_scalar(self.tol, \"tol\", float)\n+ if self.tol <= 0.0:\n+ raise ValueError(f\"`tol`= {self.tol}, must be > 0.0\")\n- if not isinstance(self.momentum, float) or not 0.0 <= self.momentum <= 1.0:\n- raise ValueError(\n- f\"momentum must be a float in [0., 1.], but {self.momentum} is given\"\n- )\n+ check_scalar(self.momentum, \"momentum\", float, min_val=0.0, max_val=1.0)\nif not isinstance(self.nesterovs_momentum, bool):\nraise ValueError(\n@@ -557,43 +544,19 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nf\"if early_stopping is True, solver must be one of 'sgd' or 'adam', but {self.solver} is given\"\n)\n- if (\n- not isinstance(self.validation_fraction, float)\n- or not 0.0 < self.validation_fraction <= 1.0\n- ):\n- raise ValueError(\n- f\"validation_fraction must be a float in (0., 1.], but {self.validation_fraction} is given\"\n- )\n-\n- if not isinstance(self.beta_1, float) or not 0.0 <= self.beta_1 <= 1.0:\n- raise ValueError(\n- f\"beta_1 must be a float in [0. 1.], but {self.beta_1} is given\"\n- )\n-\n- if not isinstance(self.beta_2, float) or not 0.0 <= self.beta_2 <= 1.0:\n- raise ValueError(\n- f\"beta_2 must be a float in [0., 1.], but {self.beta_2} is given\"\n- )\n-\n- if not isinstance(self.beta_2, float) or not 0.0 <= self.beta_2 <= 1.0:\n- raise ValueError(\n- f\"beta_2 must be a float in [0., 1.], but {self.beta_2} is given\"\n- )\n-\n- if not isinstance(self.epsilon, float) or self.epsilon < 0.0:\n- raise ValueError(\n- f\"epsilon must be a non-negative float, but {self.epsilon} is given\"\n+ check_scalar(\n+ self.validation_fraction, \"validation_fraction\", float, max_val=1.0\n)\n-\n- if not isinstance(self.n_iter_no_change, int) or self.n_iter_no_change <= 0:\n+ if self.validation_fraction <= 0.0:\nraise ValueError(\n- f\"n_iter_no_change must be a positive integer, but {self.n_iter_no_change} is given\"\n+ f\"`validation_fraction`= {self.validation_fraction}, must be > 0.0\"\n)\n- if not isinstance(self.max_fun, int) or self.max_fun <= 0:\n- raise ValueError(\n- f\"max_fun must be a positive integer, but {self.max_fun} is given\"\n- )\n+ check_scalar(self.beta_1, \"beta_1\", float, min_val=0.0, max_val=1.0)\n+ check_scalar(self.beta_2, \"beta_2\", float, min_val=0.0, max_val=1.0)\n+ check_scalar(self.epsilon, \"epsilon\", float, min_val=0.0)\n+ check_scalar(self.n_iter_no_change, \"n_iter_no_change\", int, min_val=1)\n+ check_scalar(self.max_fun, \"max_fun\", int, min_val=1)\nif self.random_state is not None:\nself.random_ = check_random_state(self.random_state)\n@@ -665,10 +628,9 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n\"\"\"\nif self.batch_size == \"auto\":\nbatch_size_ = min(200, context.shape[0])\n- elif isinstance(self.batch_size, int) and self.batch_size > 0:\n- batch_size_ = self.batch_size\nelse:\n- raise ValueError(\"batch_size must be a positive integer or 'auto'\")\n+ check_scalar(self.batch_size, \"batch_size\", int, min_val=1)\n+ batch_size_ = self.batch_size\ndataset = NNPolicyDataset(\ntorch.from_numpy(context).float(),\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline.py",
"new_path": "tests/policy/test_offline.py",
"diff": "@@ -18,19 +18,19 @@ invalid_input_of_ipw_learner_init = [\n0, #\n1,\nbase_classifier,\n- \"n_actions must be an integer larger than 1\",\n+ \"`n_actions`= 0, must be >= 1\",\n),\n(\n10,\n-1, #\nbase_classifier,\n- \"len_list must be a positive integer\",\n+ \"`len_list`= -1, must be >= 0\",\n),\n(\n10,\n20, #\nbase_classifier,\n- \"Expected `n_actions\",\n+ \"`len_list`= 20, must be <= 10\",\n),\n(10, 1, base_regressor, \"base_classifier must be a classifier\"),\n]\n@@ -255,7 +255,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n15000,\n- \"n_actions must be an integer larger than 1\",\n+ \"`n_actions`= 0, must be >= 1\",\n),\n(\n10,\n@@ -281,7 +281,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n15000,\n- \"len_list must be a positive integer\",\n+ \"`len_list`= -1, must be >= 0\",\n),\n(\n10,\n@@ -307,7 +307,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n15000,\n- \"dim_context must be a positive integer\",\n+ \"`dim_context`= -1, must be >= 0\",\n),\n(\n10,\n@@ -421,7 +421,7 @@ invalid_input_of_nn_policy_learner_init = [\n(100, 50, 100),\n\"relu\",\n\"adam\",\n- -1, #\n+ -1.0, #\n\"auto\",\n0.0001,\n200,\n@@ -437,7 +437,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n15000,\n- \"alpha must be a non-negative float\",\n+ \"`alpha`= -1.0, must be >= 0.0\",\n),\n(\n10,\n@@ -475,7 +475,7 @@ invalid_input_of_nn_policy_learner_init = [\n\"adam\",\n0.001,\n\"auto\",\n- 0, #\n+ 0.0, #\n200,\nTrue,\n123,\n@@ -489,7 +489,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n15000,\n- \"learning_rate_init must be a positive float\",\n+ \"`learning_rate_init`= 0.0, must be > 0.0\",\n),\n(\n10,\n@@ -515,7 +515,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n15000,\n- \"max_iter must be a positive integer\",\n+ \"`max_iter`= 0, must be >= 1\",\n),\n(\n10,\n@@ -583,7 +583,7 @@ invalid_input_of_nn_policy_learner_init = [\n200,\nTrue,\n123,\n- -1, #\n+ -1.0, #\n0.9,\nTrue,\nTrue,\n@@ -593,7 +593,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n15000,\n- \"tol must be a positive float\",\n+ \"`tol`= -1.0, must be > 0.0\",\n),\n(\n10,\n@@ -610,7 +610,7 @@ invalid_input_of_nn_policy_learner_init = [\nTrue,\n123,\n1e-4,\n- 2, #\n+ 2.0, #\nTrue,\nTrue,\n0.1,\n@@ -619,7 +619,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n15000,\n- \"momentum must be a float in [0., 1.]\",\n+ \"`momentum`= 2.0, must be <= 1.0\",\n),\n(\n10,\n@@ -717,13 +717,13 @@ invalid_input_of_nn_policy_learner_init = [\n0.9,\nTrue,\nTrue,\n- 2, #\n+ 2.0, #\n0.9,\n0.999,\n1e-8,\n10,\n15000,\n- \"validation_fraction must be a float in\",\n+ \"`validation_fraction`= 2.0, must be <= 1.0\",\n),\n(\n10,\n@@ -744,12 +744,12 @@ invalid_input_of_nn_policy_learner_init = [\nTrue,\nTrue,\n0.1,\n- 2, #\n+ 2.0, #\n0.999,\n1e-8,\n10,\n15000,\n- \"beta_1 must be a float in [0. 1.]\",\n+ \"`beta_1`= 2.0, must be <= 1.0\",\n),\n(\n10,\n@@ -771,11 +771,11 @@ invalid_input_of_nn_policy_learner_init = [\nTrue,\n0.1,\n0.9,\n- 2, #\n+ 2.0, #\n1e-8,\n10,\n15000,\n- \"beta_2 must be a float in [0., 1.]\",\n+ \"`beta_2`= 2.0, must be <= 1.0\",\n),\n(\n10,\n@@ -798,10 +798,10 @@ invalid_input_of_nn_policy_learner_init = [\n0.1,\n0.9,\n0.999,\n- -1, #\n+ -1.0, #\n10,\n15000,\n- \"epsilon must be a non-negative float\",\n+ \"`epsilon`= -1.0, must be >= 0.0\",\n),\n(\n10,\n@@ -827,7 +827,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n0, #\n15000,\n- \"n_iter_no_change must be a positive integer\",\n+ \"`n_iter_no_change`= 0, must be >= 1\",\n),\n(\n10,\n@@ -853,7 +853,7 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\n0, #\n- \"max_fun must be a positive integer\",\n+ \"`max_fun`= 0, must be >= 1\",\n),\n]\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix offline check-scalar |
641,006 | 05.09.2021 20:07:57 | -32,400 | 1eeb28fc644ff6160e9406d221b2649d4d5bfcad | fix offline-continuous check-scalar | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline_continuous.py",
"new_path": "obp/policy/offline_continuous.py",
"diff": "@@ -8,7 +8,7 @@ from typing import Tuple, Optional, Union, Dict\nfrom tqdm import tqdm\nimport numpy as np\n-from sklearn.utils import check_random_state\n+from sklearn.utils import check_random_state, check_scalar\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n@@ -170,10 +170,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if not isinstance(self.dim_context, int) or self.dim_context <= 0:\n- raise ValueError(\n- f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n- )\n+ check_scalar(self.dim_context, \"dim_context\", int, min_val=1)\nif self.pg_method not in [\"dpg\", \"ipw\", \"dr\"]:\nraise ValueError(\n@@ -181,10 +178,9 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\n)\nif self.pg_method != \"dpg\":\n- if not isinstance(self.bandwidth, (int, float)) or self.bandwidth <= 0:\n- raise ValueError(\n- f\"bandwidth must be a positive float, but {self.bandwidth} is given\"\n- )\n+ check_scalar(self.bandwidth, \"bandwidth\", (int, float))\n+ if self.bandwidth <= 0:\n+ raise ValueError(f\"`bandwidth`= {self.bandwidth}, must be > 0.\")\nif self.output_space is not None:\nif not isinstance(self.output_space, tuple) or any(\n@@ -206,10 +202,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nf\"solver must be one of 'adam', 'lbfgs', or 'sgd', but {self.solver} is given\"\n)\n- if not isinstance(self.alpha, float) or self.alpha < 0.0:\n- raise ValueError(\n- f\"alpha must be a non-negative float, but {self.alpha} is given\"\n- )\n+ check_scalar(self.alpha, \"alpha\", float, min_val=0.0)\nif self.batch_size != \"auto\" and (\nnot isinstance(self.batch_size, int) or self.batch_size <= 0\n@@ -218,29 +211,22 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nf\"batch_size must be a positive integer or 'auto', but {self.batch_size} is given\"\n)\n- if (\n- not isinstance(self.learning_rate_init, float)\n- or self.learning_rate_init <= 0.0\n- ):\n+ check_scalar(self.learning_rate_init, \"learning_rate_init\", float)\n+ if self.learning_rate_init <= 0.0:\nraise ValueError(\n- f\"learning_rate_init must be a positive float, but {self.learning_rate_init} is given\"\n+ f\"`learning_rate_init`= {self.learning_rate_init}, must be > 0.0\"\n)\n- if not isinstance(self.max_iter, int) or self.max_iter <= 0:\n- raise ValueError(\n- f\"max_iter must be a positive integer, but {self.max_iter} is given\"\n- )\n+ check_scalar(self.max_iter, \"max_iter\", int, min_val=1)\nif not isinstance(self.shuffle, bool):\nraise ValueError(f\"shuffle must be a bool, but {self.shuffle} is given\")\n- if not isinstance(self.tol, float) or self.tol <= 0.0:\n- raise ValueError(f\"tol must be a positive float, but {self.tol} is given\")\n+ check_scalar(self.tol, \"tol\", float)\n+ if self.tol <= 0.0:\n+ raise ValueError(f\"`tol`= {self.tol}, must be > 0.0\")\n- if not isinstance(self.momentum, float) or not 0.0 <= self.momentum <= 1.0:\n- raise ValueError(\n- f\"momentum must be a float in [0., 1.], but {self.momentum} is given\"\n- )\n+ check_scalar(self.momentum, \"momentum\", float, min_val=0.0, max_val=1.0)\nif not isinstance(self.nesterovs_momentum, bool):\nraise ValueError(\n@@ -257,43 +243,19 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nf\"if early_stopping is True, solver must be one of 'sgd' or 'adam', but {self.solver} is given\"\n)\n- if (\n- not isinstance(self.validation_fraction, float)\n- or not 0.0 < self.validation_fraction <= 1.0\n- ):\n- raise ValueError(\n- f\"validation_fraction must be a float in (0., 1.], but {self.validation_fraction} is given\"\n+ check_scalar(\n+ self.validation_fraction, \"validation_fraction\", float, max_val=1.0\n)\n-\n- if not isinstance(self.beta_1, float) or not 0.0 <= self.beta_1 <= 1.0:\n+ if self.validation_fraction <= 0.0:\nraise ValueError(\n- f\"beta_1 must be a float in [0. 1.], but {self.beta_1} is given\"\n+ f\"`validation_fraction`= {self.validation_fraction}, must be > 0.0\"\n)\n- if not isinstance(self.beta_2, float) or not 0.0 <= self.beta_2 <= 1.0:\n- raise ValueError(\n- f\"beta_2 must be a float in [0., 1.], but {self.beta_2} is given\"\n- )\n-\n- if not isinstance(self.beta_2, float) or not 0.0 <= self.beta_2 <= 1.0:\n- raise ValueError(\n- f\"beta_2 must be a float in [0., 1.], but {self.beta_2} is given\"\n- )\n-\n- if not isinstance(self.epsilon, float) or self.epsilon < 0.0:\n- raise ValueError(\n- f\"epsilon must be a non-negative float, but {self.epsilon} is given\"\n- )\n-\n- if not isinstance(self.n_iter_no_change, int) or self.n_iter_no_change <= 0:\n- raise ValueError(\n- f\"n_iter_no_change must be a positive integer, but {self.n_iter_no_change} is given\"\n- )\n-\n- if not isinstance(self.max_fun, int) or self.max_fun <= 0:\n- raise ValueError(\n- f\"max_fun must be a positive integer, but {self.max_fun} is given\"\n- )\n+ check_scalar(self.beta_1, \"beta_1\", float, min_val=0.0, max_val=1.0)\n+ check_scalar(self.beta_2, \"beta_2\", float, min_val=0.0, max_val=1.0)\n+ check_scalar(self.epsilon, \"epsilon\", float, min_val=0.0)\n+ check_scalar(self.n_iter_no_change, \"n_iter_no_change\", int, min_val=1)\n+ check_scalar(self.max_fun, \"max_fun\", int, min_val=1)\nif self.q_func_estimator_hyperparams is not None:\nif not isinstance(self.q_func_estimator_hyperparams, dict):\n@@ -802,10 +764,7 @@ class QFuncEstimatorForContinuousAction:\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- if not isinstance(self.dim_context, int) or self.dim_context <= 0:\n- raise ValueError(\n- f\"dim_context must be a positive integer, but {self.dim_context} is given\"\n- )\n+ check_scalar(self.dim_context, \"dim_context\", int, min_val=1)\nif not isinstance(self.hidden_layer_size, tuple) or any(\n[not isinstance(h, int) or h <= 0 for h in self.hidden_layer_size]\n@@ -819,10 +778,7 @@ class QFuncEstimatorForContinuousAction:\nf\"solver must be one of 'adam', 'lbfgs', or 'sgd', but {self.solver} is given\"\n)\n- if not isinstance(self.alpha, float) or self.alpha < 0.0:\n- raise ValueError(\n- f\"alpha must be a non-negative float, but {self.alpha} is given\"\n- )\n+ check_scalar(self.alpha, \"alpha\", float, min_val=0.0)\nif self.batch_size != \"auto\" and (\nnot isinstance(self.batch_size, int) or self.batch_size <= 0\n@@ -831,29 +787,22 @@ class QFuncEstimatorForContinuousAction:\nf\"batch_size must be a positive integer or 'auto', but {self.batch_size} is given\"\n)\n- if (\n- not isinstance(self.learning_rate_init, float)\n- or self.learning_rate_init <= 0.0\n- ):\n+ check_scalar(self.learning_rate_init, \"learning_rate_init\", float)\n+ if self.learning_rate_init <= 0.0:\nraise ValueError(\n- f\"learning_rate_init must be a positive float, but {self.learning_rate_init} is given\"\n+ f\"`learning_rate_init`= {self.learning_rate_init}, must be > 0.0\"\n)\n- if not isinstance(self.max_iter, int) or self.max_iter <= 0:\n- raise ValueError(\n- f\"max_iter must be a positive integer, but {self.max_iter} is given\"\n- )\n+ check_scalar(self.max_iter, \"max_iter\", int, min_val=1)\nif not isinstance(self.shuffle, bool):\nraise ValueError(f\"shuffle must be a bool, but {self.shuffle} is given\")\n- if not isinstance(self.tol, float) or self.tol <= 0.0:\n- raise ValueError(f\"tol must be a positive float, but {self.tol} is given\")\n+ check_scalar(self.tol, \"tol\", float)\n+ if self.tol <= 0.0:\n+ raise ValueError(f\"`tol`= {self.tol}, must be > 0.0\")\n- if not isinstance(self.momentum, float) or not 0.0 <= self.momentum <= 1.0:\n- raise ValueError(\n- f\"momentum must be a float in [0., 1.], but {self.momentum} is given\"\n- )\n+ check_scalar(self.momentum, \"momentum\", float, min_val=0.0, max_val=1.0)\nif not isinstance(self.nesterovs_momentum, bool):\nraise ValueError(\n@@ -870,43 +819,19 @@ class QFuncEstimatorForContinuousAction:\nf\"if early_stopping is True, solver must be one of 'sgd' or 'adam', but {self.solver} is given\"\n)\n- if (\n- not isinstance(self.validation_fraction, float)\n- or not 0.0 < self.validation_fraction <= 1.0\n- ):\n- raise ValueError(\n- f\"validation_fraction must be a float in (0., 1.], but {self.validation_fraction} is given\"\n+ check_scalar(\n+ self.validation_fraction, \"validation_fraction\", float, max_val=1.0\n)\n-\n- if not isinstance(self.beta_1, float) or not 0.0 <= self.beta_1 <= 1.0:\n+ if self.validation_fraction <= 0.0:\nraise ValueError(\n- f\"beta_1 must be a float in [0. 1.], but {self.beta_1} is given\"\n+ f\"`validation_fraction`= {self.validation_fraction}, must be > 0.0\"\n)\n- if not isinstance(self.beta_2, float) or not 0.0 <= self.beta_2 <= 1.0:\n- raise ValueError(\n- f\"beta_2 must be a float in [0., 1.], but {self.beta_2} is given\"\n- )\n-\n- if not isinstance(self.beta_2, float) or not 0.0 <= self.beta_2 <= 1.0:\n- raise ValueError(\n- f\"beta_2 must be a float in [0., 1.], but {self.beta_2} is given\"\n- )\n-\n- if not isinstance(self.epsilon, float) or self.epsilon < 0.0:\n- raise ValueError(\n- f\"epsilon must be a non-negative float, but {self.epsilon} is given\"\n- )\n-\n- if not isinstance(self.n_iter_no_change, int) or self.n_iter_no_change <= 0:\n- raise ValueError(\n- f\"n_iter_no_change must be a positive integer, but {self.n_iter_no_change} is given\"\n- )\n-\n- if not isinstance(self.max_fun, int) or self.max_fun <= 0:\n- raise ValueError(\n- f\"max_fun must be a positive integer, but {self.max_fun} is given\"\n- )\n+ check_scalar(self.beta_1, \"beta_1\", float, min_val=0.0, max_val=1.0)\n+ check_scalar(self.beta_2, \"beta_2\", float, min_val=0.0, max_val=1.0)\n+ check_scalar(self.epsilon, \"epsilon\", float, min_val=0.0)\n+ check_scalar(self.n_iter_no_change, \"n_iter_no_change\", int, min_val=1)\n+ check_scalar(self.max_fun, \"max_fun\", int, min_val=1)\nif self.random_state is not None:\nself.random_ = check_random_state(self.random_state)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline_continuous.py",
"new_path": "tests/policy/test_offline_continuous.py",
"diff": "@@ -34,7 +34,7 @@ invalid_input_of_nn_policy_learner_init = [\n10,\n15000,\nNone,\n- \"dim_context must be a positive integer\",\n+ \"`dim_context`= 0, must be >= 1\",\n),\n(\n10,\n@@ -88,7 +88,7 @@ invalid_input_of_nn_policy_learner_init = [\n10,\n15000,\nNone,\n- \"bandwidth must be a positive float\",\n+ \"`bandwidth`= -0.1, must be > 0\",\n),\n(\n10,\n@@ -206,7 +206,7 @@ invalid_input_of_nn_policy_learner_init = [\n(100, 50, 100),\n\"relu\",\n\"adam\",\n- -1, #\n+ -1.0, #\n\"auto\",\n0.0001,\n200,\n@@ -223,7 +223,7 @@ invalid_input_of_nn_policy_learner_init = [\n10,\n15000,\nNone,\n- \"alpha must be a non-negative float\",\n+ \"`alpha`= -1.0, must be >= 0.0\",\n),\n(\n10,\n@@ -262,7 +262,7 @@ invalid_input_of_nn_policy_learner_init = [\n\"adam\",\n0.001,\n\"auto\",\n- 0, #\n+ 0.0, #\n200,\nTrue,\n123,\n@@ -277,7 +277,7 @@ invalid_input_of_nn_policy_learner_init = [\n10,\n15000,\nNone,\n- \"learning_rate_init must be a positive float\",\n+ \"`learning_rate_init`= 0.0, must be > 0.0\",\n),\n(\n10,\n@@ -304,7 +304,7 @@ invalid_input_of_nn_policy_learner_init = [\n10,\n15000,\nNone,\n- \"max_iter must be a positive integer\",\n+ \"`max_iter`= 0, must be >= 1\",\n),\n(\n10,\n@@ -374,7 +374,7 @@ invalid_input_of_nn_policy_learner_init = [\n200,\nTrue,\n123,\n- -1, #\n+ -1.0, #\n0.9,\nTrue,\nTrue,\n@@ -385,7 +385,7 @@ invalid_input_of_nn_policy_learner_init = [\n10,\n15000,\nNone,\n- \"tol must be a positive float\",\n+ \"`tol`= -1.0, must be > 0.0\",\n),\n(\n10,\n@@ -402,7 +402,7 @@ invalid_input_of_nn_policy_learner_init = [\nTrue,\n123,\n1e-4,\n- 2, #\n+ 2.0, #\nTrue,\nTrue,\n0.1,\n@@ -412,7 +412,7 @@ invalid_input_of_nn_policy_learner_init = [\n10,\n15000,\nNone,\n- \"momentum must be a float in [0., 1.]\",\n+ \"`momentum`= 2.0, must be <= 1.0\",\n),\n(\n10,\n@@ -513,14 +513,14 @@ invalid_input_of_nn_policy_learner_init = [\n0.9,\nTrue,\nTrue,\n- 2, #\n+ 2.0, #\n0.9,\n0.999,\n1e-8,\n10,\n15000,\nNone,\n- \"validation_fraction must be a float in\",\n+ \"`validation_fraction`= 2.0, must be <= 1.0\",\n),\n(\n10,\n@@ -541,13 +541,13 @@ invalid_input_of_nn_policy_learner_init = [\nTrue,\nTrue,\n0.1,\n- 2, #\n+ 2.0, #\n0.999,\n1e-8,\n10,\n15000,\nNone,\n- \"beta_1 must be a float in [0. 1.]\",\n+ \"`beta_1`= 2.0, must be <= 1.0\",\n),\n(\n10,\n@@ -569,12 +569,12 @@ invalid_input_of_nn_policy_learner_init = [\nTrue,\n0.1,\n0.9,\n- 2, #\n+ 2.0, #\n1e-8,\n10,\n15000,\nNone,\n- \"beta_2 must be a float in [0., 1.]\",\n+ \"`beta_2`= 2.0, must be <= 1.0\",\n),\n(\n10,\n@@ -597,11 +597,11 @@ invalid_input_of_nn_policy_learner_init = [\n0.1,\n0.9,\n0.999,\n- -1, #\n+ -1.0, #\n10,\n15000,\nNone,\n- \"epsilon must be a non-negative float\",\n+ \"`epsilon`= -1.0, must be >= 0.0\",\n),\n(\n10,\n@@ -628,7 +628,7 @@ invalid_input_of_nn_policy_learner_init = [\n0, #\n15000,\nNone,\n- \"n_iter_no_change must be a positive integer\",\n+ \"`n_iter_no_change`= 0, must be >= 1\",\n),\n(\n10,\n@@ -655,7 +655,7 @@ invalid_input_of_nn_policy_learner_init = [\n10,\n0, #\nNone,\n- \"max_fun must be a positive integer\",\n+ \"`max_fun`= 0, must be >= 1\",\n),\n(\n10,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix offline-continuous check-scalar |
641,006 | 06.09.2021 00:46:43 | -32,400 | 0e28a8c2ab696f0712d1725bbc583644556c4771 | add self-normalized slate estimators | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -15,6 +15,8 @@ from obp.ope.estimators_slate import SlateStandardIPS\nfrom obp.ope.estimators_slate import SlateIndependentIPS\nfrom obp.ope.estimators_slate import SlateRewardInteractionIPS\nfrom obp.ope.estimators_slate import SelfNormalizedSlateRewardInteractionIPS\n+from obp.ope.estimators_slate import SelfNormalizedSlateIndependentIPS\n+from obp.ope.estimators_slate import SelfNormalizedSlateStandardIPS\nfrom obp.ope.estimators_continuous import BaseContinuousOffPolicyEstimator\nfrom obp.ope.estimators_continuous import KernelizedInverseProbabilityWeighting\nfrom obp.ope.estimators_continuous import (\n@@ -52,6 +54,8 @@ __all__ = [\n\"SlateIndependentIPS\",\n\"SlateRewardInteractionIPS\",\n\"SelfNormalizedSlateRewardInteractionIPS\",\n+ \"SelfNormalizedSlateIndependentIPS\",\n+ \"SelfNormalizedSlateStandardIPS\",\n\"BaseContinuousOffPolicyEstimator\",\n\"KernelizedInverseProbabilityWeighting\",\n\"KernelizedSelfNormalizedInverseProbabilityWeighting\",\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_slate.py",
"new_path": "obp/ope/estimators_slate.py",
"diff": "@@ -128,6 +128,44 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nrandom_state=random_state,\n)\n+ def _estimate_round_rewards_self_normalized(\n+ self,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ behavior_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Self-Normalized estimated rewards given round (slate_id) and slot (position).\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed at each slot in each round of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ IDs to differentiate slot (i.e., position in recommendation/ranking interface) in each slate.\n+\n+ behavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities of evaluation policy, i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n+ Self-Normalized rewards estimated by IPW given round (slate_id) and slot (position).\n+\n+ \"\"\"\n+ estimated_rewards = np.zeros_like(behavior_policy_pscore)\n+ iw = np.zeros_like(behavior_policy_pscore)\n+ for position_ in range(self.len_list):\n+ idx = position == position_\n+ iw[idx] = evaluation_policy_pscore[idx] / behavior_policy_pscore[idx]\n+ estimated_rewards[idx] = reward[idx] * iw[idx] / iw[idx].mean()\n+ return estimated_rewards\n+\n@dataclass\nclass SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\n@@ -567,34 +605,173 @@ class SlateRewardInteractionIPS(BaseSlateInverseProbabilityWeighting):\n@dataclass\n-class SelfNormalizedSlateRewardInteractionIPS(SlateRewardInteractionIPS):\n- \"\"\"Self-Normalized Inverse Probability Weighting (SNIPW) Estimator.\n+class SelfNormalizedSlateStandardIPS(SlateStandardIPS):\n+ \"\"\"Self-Normalized Standard Interaction Inverse Propensity Scoring (SNSIPS) Estimator.\n+\n+ Note\n+ -------\n+ Self-Normalized Standard Interaction Inverse Propensity Scoring (SNSIPS) is our original estimator based on the SlateStandardIPS.\n+\n+ SNSIPS calculates the slot-level empirical average of importance weights\n+ and re-weights the observed rewards of slot :math:`k` by the averaged weight of the slot.\n+\n+ A Self-Normalized estimator is not unbiased even when the behavior policy is known.\n+ However, it is still consistent for the true policy value and increases the stability in some senses.\n+ See the references for the detailed discussions.\n+\n+ Parameters\n+ ----------\n+ estimator_name: str, default='snsips'.\n+ Name of the estimator.\n+\n+ References\n+ ----------\n+ James McInerney, Brian Brost, Praveen Chandar, Rishabh Mehrotra, and Ben Carterette.\n+ \"Counterfactual Evaluation of Slate Recommendations with Sequential Reward Interactions\", 2020.\n+\n+ Adith Swaminathan and Thorsten Joachims.\n+ \"The Self-normalized Estimator for Counterfactual Learning.\", 2015.\n+\n+ Nathan Kallus and Masatoshi Uehara.\n+ \"Intrinsically Efficient, Stable, and Bounded Off-Policy Evaluation for Reinforcement Learning.\", 2019.\n+\n+ \"\"\"\n+\n+ estimator_name: str = \"snsips\"\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ behavior_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate rewards given round (slate_id) and slot (position).\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed at each slot in each round of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ IDs to differentiate slot (i.e., position in recommendation/ranking interface) in each slate.\n+\n+ behavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities of evaluation policy, i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n+ Rewards estimated by the SNSIPS estimator given round (slate_id) and slot (position).\n+\n+ \"\"\"\n+ return self._estimate_round_rewards_self_normalized(\n+ reward, position, behavior_policy_pscore, evaluation_policy_pscore\n+ )\n+\n+\n+@dataclass\n+class SelfNormalizedSlateIndependentIPS(SlateIndependentIPS):\n+ \"\"\"Self-Normalized Independent Inverse Propensity Scoring (SNIIPS) Estimator.\nNote\n-------\n- Self-Normalized Inverse Probability Weighting (SNIPW) estimates the policy value of evaluation policy :math:`\\\\pi_e` by\n+ Self-Normalized Independent Inverse Propensity Scoring (SNIIPS) is our original estimator based on the SlateIndependentIPS.\n+\n+ SNIIPS calculates the slot-level empirical average of importance weights\n+ and re-weights the observed rewards of slot :math:`k` by the averaged weight of the slot.\n+\n+ A Self-Normalized estimator is not unbiased even when the behavior policy is known.\n+ However, it is still consistent for the true policy value and increases the stability in some senses.\n+ See the references for the detailed discussions.\n+\n+ Parameters\n+ ----------\n+ estimator_name: str, default='sniips'.\n+ Name of the estimator.\n+\n+ References\n+ ----------\n+ Shuai Li, Yasin Abbasi-Yadkori, Branislav Kveton, S. Muthukrishnan, Vishwa Vinay, Zheng Wen.\n+ \"Offline Evaluation of Ranking Policies with Click Models\", 2018.\n+\n+ James McInerney, Brian Brost, Praveen Chandar, Rishabh Mehrotra, and Ben Carterette.\n+ \"Counterfactual Evaluation of Slate Recommendations with Sequential Reward Interactions\", 2020.\n+\n+ Adith Swaminathan and Thorsten Joachims.\n+ \"The Self-normalized Estimator for Counterfactual Learning.\", 2015.\n+\n+ Nathan Kallus and Masatoshi Uehara.\n+ \"Intrinsically Efficient, Stable, and Bounded Off-Policy Evaluation for Reinforcement Learning.\", 2019.\n+\n+ \"\"\"\n+\n+ estimator_name: str = \"sniips\"\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ behavior_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate rewards given round (slate_id) and slot (position).\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed at each slot in each round of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ IDs to differentiate slot (i.e., position in recommendation/ranking interface) in each slate.\n+\n+ behavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities of evaluation policy, i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n+ Rewards estimated by the SNIIPS estimator given round (slate_id) and slot (position).\n- .. math::\n+ \"\"\"\n+ return self._estimate_round_rewards_self_normalized(\n+ reward, position, behavior_policy_pscore, evaluation_policy_pscore\n+ )\n- \\\\hat{V}_{\\\\mathrm{SNIPW}} (\\\\pi_e; \\\\mathcal{D}) :=\n- \\\\frac{\\\\mathbb{E}_{\\\\mathcal{D}} [w(x_t,a_t) r_t]}{ \\\\mathbb{E}_{\\\\mathcal{D}} [w(x_t,a_t)]},\n- where :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by\n- a behavior policy :math:`\\\\pi_b`. :math:`w(x,a):=\\\\pi_e (a|x)/\\\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.\n- :math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n+@dataclass\n+class SelfNormalizedSlateRewardInteractionIPS(SlateRewardInteractionIPS):\n+ \"\"\"Self-Normalized Reward Interaction Inverse Propensity Scoring (SNRIPS) Estimator.\n- SNIPW re-weights the observed rewards by the self-normalized importance weihgt.\n- This estimator is not unbiased even when the behavior policy is known.\n+ Note\n+ -------\n+ Self-Normalized Reward Interaction Inverse Propensity Scoring (SNRIPS) is our original estimator based on the SlateRewardInteractionIPS.\n+\n+ SNRIPS calculates the slot-level empirical average of importance weights\n+ and re-weights the observed rewards of slot :math:`k` by the averaged weight of the slot.\n+\n+ A Self-Normalized estimator is not unbiased even when the behavior policy is known.\nHowever, it is still consistent for the true policy value and increases the stability in some senses.\nSee the references for the detailed discussions.\nParameters\n----------\n- estimator_name: str, default='snipw'.\n+ estimator_name: str, default='snrips'.\nName of the estimator.\nReferences\n----------\n+ James McInerney, Brian Brost, Praveen Chandar, Rishabh Mehrotra, and Ben Carterette.\n+ \"Counterfactual Evaluation of Slate Recommendations with Sequential Reward Interactions\", 2020.\n+\nAdith Swaminathan and Thorsten Joachims.\n\"The Self-normalized Estimator for Counterfactual Learning.\", 2015.\n@@ -632,13 +809,9 @@ class SelfNormalizedSlateRewardInteractionIPS(SlateRewardInteractionIPS):\nReturns\n----------\nestimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by IPW given round (slate_id) and slot (position).\n+ Rewards estimated by the SNRIPS estimator given round (slate_id) and slot (position).\n\"\"\"\n- estimated_rewards = np.zeros_like(behavior_policy_pscore)\n- iw = np.zeros_like(behavior_policy_pscore)\n- for position_ in range(self.len_list):\n- idx = position == position_\n- iw[idx] = evaluation_policy_pscore[idx] / behavior_policy_pscore[idx]\n- estimated_rewards[idx] = reward[idx] * iw[idx] / iw[idx].mean()\n- return estimated_rewards\n+ return self._estimate_round_rewards_self_normalized(\n+ reward, position, behavior_policy_pscore, evaluation_policy_pscore\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add self-normalized slate estimators |
641,014 | 06.09.2021 20:21:14 | 14,400 | 16c7d1c20f9ac30bf6327ecd967d7e46c36c48f9 | update obp version in benchmark | [
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/README.md",
"new_path": "benchmark/ope/README.md",
"diff": "@@ -53,8 +53,8 @@ numpy = \"^1.21.1\"\nmatplotlib = \"^3.4.2\"\nhydra-core = \"^1.1.0\"\npingouin = \"^0.4.0\"\n-obp = \"^0.4.2\"\npyieoe = \"^0.1.0\"\n+obp = \"^0.5.0\"\n```\n## Files\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/poetry.lock",
"new_path": "benchmark/ope/poetry.lock",
"diff": "@@ -184,23 +184,25 @@ python-versions = \">=3.7,<3.11\"\n[[package]]\nname = \"obp\"\n-version = \"0.4.1\"\n+version = \"0.5.0\"\ndescription = \"Open Bandit Pipeline: a python library for bandit algorithms and off-policy evaluation\"\ncategory = \"main\"\noptional = false\n-python-versions = \"*\"\n+python-versions = \">=3.7.1,<3.10\"\n[package.dependencies]\n-matplotlib = \">=3.2.2\"\n-mypy-extensions = \">=0.4.3\"\n-numpy = \">=1.18.1\"\n-pandas = \">=0.25.1\"\n-pyyaml = \">=5.1\"\n-scikit-learn = \">=0.23.1\"\n-scipy = \">=1.4.1\"\n-seaborn = \">=0.10.1\"\n-torch = \">=1.7.1\"\n-tqdm = \">=4.41.1\"\n+matplotlib = \">=3.4.3,<4.0.0\"\n+mypy-extensions = \">=0.4.3,<0.5.0\"\n+numpy = \">=1.21.2,<2.0.0\"\n+pandas = \">=1.3.2,<2.0.0\"\n+pingouin = \">=0.4.0,<0.5.0\"\n+pyieoe = \">=0.1.1,<0.2.0\"\n+PyYAML = \">=5.4.1,<6.0.0\"\n+scikit-learn = \">=0.24.2,<0.25.0\"\n+scipy = \">=1.7.1,<2.0.0\"\n+seaborn = \">=0.11.2,<0.12.0\"\n+torch = \">=1.9.0,<2.0.0\"\n+tqdm = \">=4.62.2,<5.0.0\"\n[[package]]\nname = \"omegaconf\"\n@@ -320,7 +322,7 @@ python-versions = \">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\"\n[[package]]\nname = \"pyieoe\"\n-version = \"0.1.0\"\n+version = \"0.1.1\"\ndescription = \"pyIEOE: a Python package to facilitate interpretable OPE evaluation\"\ncategory = \"main\"\noptional = false\n@@ -509,7 +511,7 @@ typing-extensions = \"*\"\n[[package]]\nname = \"tqdm\"\n-version = \"4.62.1\"\n+version = \"4.62.2\"\ndescription = \"Fast, Extensible Progress Meter\"\ncategory = \"main\"\noptional = false\n@@ -567,7 +569,7 @@ viz = [\"matplotlib\", \"seaborn\", \"nc-time-axis\"]\n[metadata]\nlock-version = \"1.1\"\npython-versions = \"^3.9,<3.10\"\n-content-hash = \"4d5877b71e7c289230edf83a4a4fba035f9ede84ef017df5bed560154d2c13c6\"\n+content-hash = \"f61d01af9f6cb2ab9ec46eeb39d3d02f096cc4964ea7532cff3f5bf032e118a2\"\n[metadata.files]\nantlr4-python3-runtime = [\n@@ -718,9 +720,7 @@ numpy = [\n{file = \"numpy-1.21.2.zip\", hash = \"sha256:423216d8afc5923b15df86037c6053bf030d15cc9e3224206ef868c2d63dd6dc\"},\n]\nobp = [\n- {file = \"obp-0.4.1-py3-none-any.whl\", hash = \"sha256:87dec9caf4283c25ab13036532e0a24566f1c7bdcad1653a8ded43b2f5ef8208\"},\n- {file = \"obp-0.4.1-py3.8.egg\", hash = \"sha256:ec415eda29f718d9e93e911b00b8fcb8d0d8caf72b5c47992b424f525da7ceca\"},\n- {file = \"obp-0.4.1.tar.gz\", hash = \"sha256:4be96fae9ba4f7a0cff3e313209e767c466f1b940bbc203c4238cc23f1241d15\"},\n+ {file = \"obp-0.5.0.tar.gz\", hash = \"sha256:a8bbcb9aeb6e17da754690abac7e64e28f4c09214de3ce64643966aeaf1b947b\"},\n]\nomegaconf = [\n{file = \"omegaconf-2.1.1-py3-none-any.whl\", hash = \"sha256:be93d73eaa2564fbe52d88ee13e3b79f4c6e04876b2f326551a21391f7dc6367\"},\n@@ -816,7 +816,7 @@ pyflakes = [\n{file = \"pyflakes-2.3.1.tar.gz\", hash = \"sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db\"},\n]\npyieoe = [\n- {file = \"pyieoe-0.1.0.tar.gz\", hash = \"sha256:a9f2f4b88c80b6dd9bfada8d9b79b250e33ceabb0fe713c385958a65962a5559\"},\n+ {file = \"pyieoe-0.1.1.tar.gz\", hash = \"sha256:679bee6e437cbba7bae9462a5d52d7d1a71bc7568aa4f80eec0add1dc3126b78\"},\n]\npyparsing = [\n{file = \"pyparsing-2.4.7-py2.py3-none-any.whl\", hash = \"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b\"},\n@@ -1020,8 +1020,8 @@ torch = [\n{file = \"torch-1.9.0-cp39-none-macosx_11_0_arm64.whl\", hash = \"sha256:569ead6ae6bb0e636df0fc8af660ef03260e630dc5f2f4cf3198027e7b6bb481\"},\n]\ntqdm = [\n- {file = \"tqdm-4.62.1-py2.py3-none-any.whl\", hash = \"sha256:07856e19a1fe4d2d9621b539d3f072fa88c9c1ef1f3b7dd4d4953383134c3164\"},\n- {file = \"tqdm-4.62.1.tar.gz\", hash = \"sha256:35540feeaca9ac40c304e916729e6b78045cbbeccd3e941b2868f09306798ac9\"},\n+ {file = \"tqdm-4.62.2-py2.py3-none-any.whl\", hash = \"sha256:80aead664e6c1672c4ae20dc50e1cdc5e20eeff9b14aa23ecd426375b28be588\"},\n+ {file = \"tqdm-4.62.2.tar.gz\", hash = \"sha256:a4d6d112e507ef98513ac119ead1159d286deab17dffedd96921412c2d236ff5\"},\n]\ntyping-extensions = [\n{file = \"typing_extensions-3.10.0.0-py2-none-any.whl\", hash = \"sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497\"},\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/ope/pyproject.toml",
"new_path": "benchmark/ope/pyproject.toml",
"diff": "[tool.poetry]\nname = \"benchmark/ope\"\nversion = \"0.1.0\"\n-description = \"\"\n+description = \"benchmarking OPE estimators on Open Bandit Dataset\"\nauthors = [\"usaito <[email protected]>\"]\n[tool.poetry.dependencies]\n@@ -12,8 +12,8 @@ numpy = \"^1.21.1\"\nmatplotlib = \"^3.4.2\"\nhydra-core = \"^1.1.0\"\npingouin = \"^0.4.0\"\n-obp = \"^0.4.2\"\npyieoe = \"^0.1.0\"\n+obp = \"^0.5.0\"\n[tool.poetry.dev-dependencies]\nflake8 = \"^3.9.2\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | update obp version in benchmark |
641,006 | 20.09.2021 17:08:24 | -32,400 | 5deff62cf41ba553b25060dcebdd461734a04675 | fix self normalized estimator class | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_slate.py",
"new_path": "obp/ope/estimators_slate.py",
"diff": "@@ -128,44 +128,6 @@ class BaseSlateInverseProbabilityWeighting(BaseSlateOffPolicyEstimator):\nrandom_state=random_state,\n)\n- def _estimate_round_rewards_self_normalized(\n- self,\n- reward: np.ndarray,\n- position: np.ndarray,\n- behavior_policy_pscore: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n- **kwargs,\n- ) -> np.ndarray:\n- \"\"\"Self-Normalized estimated rewards given round (slate_id) and slot (position).\n-\n- Parameters\n- ----------\n- reward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed at each slot in each round of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\n-\n- position: array-like, shape (<= n_rounds * len_list,)\n- IDs to differentiate slot (i.e., position in recommendation/ranking interface) in each slate.\n-\n- behavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n-\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities of evaluation policy, i.e., :math:`\\\\pi_e(a_t|x_t)`.\n-\n- Returns\n- ----------\n- estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Self-Normalized rewards estimated by IPW given round (slate_id) and slot (position).\n-\n- \"\"\"\n- estimated_rewards = np.zeros_like(behavior_policy_pscore)\n- iw = np.zeros_like(behavior_policy_pscore)\n- for position_ in range(self.len_list):\n- idx = position == position_\n- iw[idx] = evaluation_policy_pscore[idx] / behavior_policy_pscore[idx]\n- estimated_rewards[idx] = reward[idx] * iw[idx] / iw[idx].mean()\n- return estimated_rewards\n-\n@dataclass\nclass SlateStandardIPS(BaseSlateInverseProbabilityWeighting):\n@@ -605,7 +567,62 @@ class SlateRewardInteractionIPS(BaseSlateInverseProbabilityWeighting):\n@dataclass\n-class SelfNormalizedSlateStandardIPS(SlateStandardIPS):\n+class BaseSlateSelfNormalizedInverseProbabilityWeighting(\n+ BaseSlateInverseProbabilityWeighting\n+):\n+ \"\"\"Base Class of Self-Normalized Inverse Probability Weighting Estimators for the slate contextual bandit setting.\n+\n+ len_list: int (> 1)\n+ Length of a list of actions recommended in each impression.\n+ When Open Bandit Dataset is used, `len_list=3`.\n+\n+ \"\"\"\n+\n+ len_list: int\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ behavior_policy_pscore: np.ndarray,\n+ evaluation_policy_pscore: np.ndarray,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Self-Normalized estimated rewards given round (slate_id) and slot (position).\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (<= n_rounds * len_list,)\n+ Reward observed at each slot in each round of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\n+\n+ position: array-like, shape (<= n_rounds * len_list,)\n+ IDs to differentiate slot (i.e., position in recommendation/ranking interface) in each slate.\n+\n+ behavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n+ Action choice probabilities of evaluation policy, i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n+ Self-Normalized rewards estimated by IPW given round (slate_id) and slot (position).\n+\n+ \"\"\"\n+ estimated_rewards = np.zeros_like(behavior_policy_pscore)\n+ iw = np.zeros_like(behavior_policy_pscore)\n+ for position_ in range(self.len_list):\n+ idx = position == position_\n+ iw[idx] = evaluation_policy_pscore[idx] / behavior_policy_pscore[idx]\n+ estimated_rewards[idx] = reward[idx] * iw[idx] / iw[idx].mean()\n+ return estimated_rewards\n+\n+\n+@dataclass\n+class SelfNormalizedSlateStandardIPS(\n+ SlateStandardIPS, BaseSlateSelfNormalizedInverseProbabilityWeighting\n+):\n\"\"\"Self-Normalized Standard Interaction Inverse Propensity Scoring (SNSIPS) Estimator.\nNote\n@@ -669,13 +686,16 @@ class SelfNormalizedSlateStandardIPS(SlateStandardIPS):\nRewards estimated by the SNSIPS estimator given round (slate_id) and slot (position).\n\"\"\"\n- return self._estimate_round_rewards_self_normalized(\n- reward, position, behavior_policy_pscore, evaluation_policy_pscore\n- )\n+ estimated_rewards = np.zeros_like(behavior_policy_pscore)\n+ iw = evaluation_policy_pscore / behavior_policy_pscore\n+ estimated_rewards = reward * iw / iw.mean()\n+ return estimated_rewards\n@dataclass\n-class SelfNormalizedSlateIndependentIPS(SlateIndependentIPS):\n+class SelfNormalizedSlateIndependentIPS(\n+ SlateIndependentIPS, BaseSlateSelfNormalizedInverseProbabilityWeighting\n+):\n\"\"\"Self-Normalized Independent Inverse Propensity Scoring (SNIIPS) Estimator.\nNote\n@@ -712,43 +732,11 @@ class SelfNormalizedSlateIndependentIPS(SlateIndependentIPS):\nestimator_name: str = \"sniips\"\n- def _estimate_round_rewards(\n- self,\n- reward: np.ndarray,\n- position: np.ndarray,\n- behavior_policy_pscore: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n- **kwargs,\n- ) -> np.ndarray:\n- \"\"\"Estimate rewards given round (slate_id) and slot (position).\n-\n- Parameters\n- ----------\n- reward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed at each slot in each round of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\n-\n- position: array-like, shape (<= n_rounds * len_list,)\n- IDs to differentiate slot (i.e., position in recommendation/ranking interface) in each slate.\n-\n- behavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n-\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities of evaluation policy, i.e., :math:`\\\\pi_e(a_t|x_t)`.\n-\n- Returns\n- ----------\n- estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by the SNIIPS estimator given round (slate_id) and slot (position).\n-\n- \"\"\"\n- return self._estimate_round_rewards_self_normalized(\n- reward, position, behavior_policy_pscore, evaluation_policy_pscore\n- )\n-\n@dataclass\n-class SelfNormalizedSlateRewardInteractionIPS(SlateRewardInteractionIPS):\n+class SelfNormalizedSlateRewardInteractionIPS(\n+ SlateRewardInteractionIPS, BaseSlateSelfNormalizedInverseProbabilityWeighting\n+):\n\"\"\"Self-Normalized Reward Interaction Inverse Propensity Scoring (SNRIPS) Estimator.\nNote\n@@ -781,37 +769,3 @@ class SelfNormalizedSlateRewardInteractionIPS(SlateRewardInteractionIPS):\n\"\"\"\nestimator_name: str = \"snrips\"\n-\n- def _estimate_round_rewards(\n- self,\n- reward: np.ndarray,\n- position: np.ndarray,\n- behavior_policy_pscore: np.ndarray,\n- evaluation_policy_pscore: np.ndarray,\n- **kwargs,\n- ) -> np.ndarray:\n- \"\"\"Estimate rewards given round (slate_id) and slot (position).\n-\n- Parameters\n- ----------\n- reward: array-like, shape (<= n_rounds * len_list,)\n- Reward observed at each slot in each round of the logged bandit feedback, i.e., :math:`r_{t}(k)`.\n-\n- position: array-like, shape (<= n_rounds * len_list,)\n- IDs to differentiate slot (i.e., position in recommendation/ranking interface) in each slate.\n-\n- behavior_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n-\n- evaluation_policy_pscore: array-like, shape (<= n_rounds * len_list,)\n- Action choice probabilities of evaluation policy, i.e., :math:`\\\\pi_e(a_t|x_t)`.\n-\n- Returns\n- ----------\n- estimated_rewards: array-like, shape (<= n_rounds * len_list,)\n- Rewards estimated by the SNRIPS estimator given round (slate_id) and slot (position).\n-\n- \"\"\"\n- return self._estimate_round_rewards_self_normalized(\n- reward, position, behavior_policy_pscore, evaluation_policy_pscore\n- )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix self normalized estimator class |
641,006 | 20.09.2021 17:08:43 | -32,400 | 7957c278beddd7b1224c70a0283a617d1f0e3696 | add self normalized slate ope tests | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators_slate.py",
"new_path": "tests/ope/test_ipw_estimators_slate.py",
"diff": "import pytest\nimport numpy as np\n-from obp.ope import SlateStandardIPS, SlateIndependentIPS, SlateRewardInteractionIPS\n+from obp.ope import (\n+ SlateStandardIPS,\n+ SlateIndependentIPS,\n+ SlateRewardInteractionIPS,\n+ SelfNormalizedSlateStandardIPS,\n+ SelfNormalizedSlateIndependentIPS,\n+ SelfNormalizedSlateRewardInteractionIPS,\n+)\nfrom obp.dataset import (\nlogistic_reward_function,\nlinear_behavior_policy_logit,\n@@ -13,6 +20,9 @@ len_list = 3\nsips = SlateStandardIPS(len_list=len_list)\niips = SlateIndependentIPS(len_list=len_list)\nrips = SlateRewardInteractionIPS(len_list=len_list)\n+snsips = SelfNormalizedSlateStandardIPS(len_list=len_list)\n+sniips = SelfNormalizedSlateIndependentIPS(len_list=len_list)\n+snrips = SelfNormalizedSlateRewardInteractionIPS(len_list=len_list)\nn_rounds = 5\n@@ -145,6 +155,49 @@ def test_slate_estimators_using_invalid_input_data(\nposition=position,\nevaluation_policy_pscore_cascade=evaluation_policy_pscore,\n)\n+ # self normalized\n+ _ = snsips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = snsips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = sniips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n+ )\n+ _ = sniips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n+ )\n+ _ = snrips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n+ )\n+ _ = snrips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n+ )\n# --- valid (all slate estimators) ---\n@@ -218,6 +271,49 @@ def test_slate_estimators_using_valid_input_data(\nposition=position,\nevaluation_policy_pscore_cascade=evaluation_policy_pscore,\n)\n+ # self normalized\n+ _ = snsips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = snsips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = sniips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n+ )\n+ _ = sniips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n+ )\n+ _ = snrips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n+ )\n+ _ = snrips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n+ )\n# --- invalid (sips) ---\n@@ -335,6 +431,21 @@ def test_sips_using_invalid_input_data(\nposition=position,\nevaluation_policy_pscore=evaluation_policy_pscore,\n)\n+ # self normalized\n+ _ = snsips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n+ _ = snsips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ )\n# --- invalid (iips) ---\n@@ -441,6 +552,21 @@ def test_iips_using_invalid_input_data(\nposition=position,\nevaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,\n)\n+ # self normalized\n+ _ = sniips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,\n+ )\n+ _ = sniips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,\n+ )\n# --- invalid (rips) ---\n@@ -563,6 +689,21 @@ def test_rips_using_invalid_input_data(\nposition=position,\nevaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,\n)\n+ # self normalized\n+ _ = snrips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,\n+ )\n+ _ = snrips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,\n+ )\n# --- confidence intervals ---\n@@ -633,6 +774,37 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n)\n+ # self normalized\n+ _ = snsips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = sniips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = snrips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\[email protected](\n@@ -685,6 +857,37 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n)\n+ # self normalized\n+ _ = snsips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = sniips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore,\n+ position=position,\n+ evaluation_policy_pscore_item_position=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\n+ _ = snrips.estimate_interval(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore,\n+ position=position,\n+ evaluation_policy_pscore_cascade=evaluation_policy_pscore,\n+ alpha=alpha,\n+ n_bootstrap_samples=n_bootstrap_samples,\n+ random_state=random_state,\n+ )\ndef test_slate_ope_performance_using_cascade_additive_log():\n@@ -758,6 +961,30 @@ def test_slate_ope_performance_using_cascade_additive_log():\nposition=position,\nevaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n)\n+ # self normalized\n+ snsips_estimated_policy_value = snsips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore\"],\n+ )\n+ sniips_estimated_policy_value = sniips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore_item_position=random_behavior_feedback[\n+ \"pscore_item_position\"\n+ ],\n+ )\n+ snrips_estimated_policy_value = snrips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n+ )\n# compute statistics of ground truth policy value\nq_pi_e = (\nrandom_behavior_feedback[\"reward\"]\n@@ -774,6 +1001,9 @@ def test_slate_ope_performance_using_cascade_additive_log():\n\"sips\": sips_estimated_policy_value,\n\"iips\": iips_estimated_policy_value,\n\"rips\": rips_estimated_policy_value,\n+ \"snsips\": snsips_estimated_policy_value,\n+ \"sniips\": sniips_estimated_policy_value,\n+ \"snrips\": snrips_estimated_policy_value,\n}\nfor key in estimated_policy_value:\nprint(\n@@ -856,6 +1086,30 @@ def test_slate_ope_performance_using_independent_log():\nposition=position,\nevaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n)\n+ # self normalized\n+ snsips_estimated_policy_value = snsips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore\"],\n+ )\n+ sniips_estimated_policy_value = sniips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore_item_position=random_behavior_feedback[\n+ \"pscore_item_position\"\n+ ],\n+ )\n+ snrips_estimated_policy_value = snrips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n+ )\n# compute statistics of ground truth policy value\nq_pi_e = (\nrandom_behavior_feedback[\"reward\"]\n@@ -872,6 +1126,9 @@ def test_slate_ope_performance_using_independent_log():\n\"sips\": sips_estimated_policy_value,\n\"iips\": iips_estimated_policy_value,\n\"rips\": rips_estimated_policy_value,\n+ \"snsips\": snsips_estimated_policy_value,\n+ \"sniips\": sniips_estimated_policy_value,\n+ \"snrips\": snrips_estimated_policy_value,\n}\nfor key in estimated_policy_value:\nprint(\n@@ -954,6 +1211,30 @@ def test_slate_ope_performance_using_standard_additive_log():\nposition=position,\nevaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n)\n+ # self normalized\n+ snsips_estimated_policy_value = snsips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore\"],\n+ )\n+ sniips_estimated_policy_value = sniips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore_item_position=random_behavior_feedback[\n+ \"pscore_item_position\"\n+ ],\n+ )\n+ snrips_estimated_policy_value = snrips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n+ )\n# compute statistics of ground truth policy value\nq_pi_e = (\nrandom_behavior_feedback[\"reward\"]\n@@ -970,6 +1251,9 @@ def test_slate_ope_performance_using_standard_additive_log():\n\"sips\": sips_estimated_policy_value,\n\"iips\": iips_estimated_policy_value,\n\"rips\": rips_estimated_policy_value,\n+ \"snsips\": snsips_estimated_policy_value,\n+ \"sniips\": sniips_estimated_policy_value,\n+ \"snrips\": snrips_estimated_policy_value,\n}\nfor key in estimated_policy_value:\nprint(\n@@ -979,3 +1263,91 @@ def test_slate_ope_performance_using_standard_additive_log():\nassert (\nnp.abs(gt_mean - estimated_policy_value[key]) <= ci_bound\n), f\"OPE of {key} did not work well (absolute error is greater than 3*sigma)\"\n+\n+\n+def test_boundedness_of_slate_snipw_using_random_evaluation_policy() -> None:\n+ \"\"\"\n+ Test the boundedness of snipw estimators using synthetic bandit data and random evaluation policy\n+ \"\"\"\n+ # set parameters\n+ n_unique_action = 10\n+ len_list = 3\n+ dim_context = 2\n+ reward_type = \"binary\"\n+ random_state = 12345\n+ n_rounds = 1000\n+ reward_structure = \"standard_additive\"\n+ click_model = None\n+ behavior_policy_function = linear_behavior_policy_logit\n+ reward_function = logistic_reward_function\n+ dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ random_state=random_state,\n+ behavior_policy_function=behavior_policy_function,\n+ base_reward_function=reward_function,\n+ )\n+ random_behavior_dataset = SyntheticSlateBanditDataset(\n+ n_unique_action=n_unique_action,\n+ len_list=len_list,\n+ dim_context=dim_context,\n+ reward_type=reward_type,\n+ reward_structure=reward_structure,\n+ click_model=click_model,\n+ random_state=random_state,\n+ behavior_policy_function=None,\n+ base_reward_function=reward_function,\n+ )\n+ # obtain feedback\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ slate_id = bandit_feedback[\"slate_id\"]\n+ reward = bandit_feedback[\"reward\"]\n+ # make pscore too small (to check the boundedness of snipw)\n+ pscore = bandit_feedback[\"pscore\"] ** 3\n+ pscore_item_position = bandit_feedback[\"pscore_item_position\"] ** 3\n+ pscore_cascade = bandit_feedback[\"pscore_cascade\"] ** 3\n+ position = bandit_feedback[\"position\"]\n+\n+ # obtain random behavior feedback\n+ random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(\n+ n_rounds=n_rounds\n+ )\n+\n+ # self normalized\n+ snsips_estimated_policy_value = snsips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ evaluation_policy_pscore=random_behavior_feedback[\"pscore\"],\n+ )\n+ sniips_estimated_policy_value = sniips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_item_position=pscore_item_position,\n+ position=position,\n+ evaluation_policy_pscore_item_position=random_behavior_feedback[\n+ \"pscore_item_position\"\n+ ],\n+ )\n+ snrips_estimated_policy_value = snrips.estimate_policy_value(\n+ slate_id=slate_id,\n+ reward=reward,\n+ pscore_cascade=pscore_cascade,\n+ position=position,\n+ evaluation_policy_pscore_cascade=random_behavior_feedback[\"pscore_cascade\"],\n+ )\n+\n+ estimated_policy_value = {\n+ \"snsips\": snsips_estimated_policy_value,\n+ \"sniips\": sniips_estimated_policy_value,\n+ \"snrips\": snrips_estimated_policy_value,\n+ }\n+ for key in estimated_policy_value:\n+ assert (\n+ estimated_policy_value[key] <= len_list\n+ ), f\"estimated policy value of snipw should be smaller than or equal to {len_list} (because of its 1-boundedness for each position), but the value is: {estimated_policy_value}\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add self normalized slate ope tests |
641,014 | 27.09.2021 15:35:47 | -7,200 | 5a956b322bf8df7e310a8f627445eabb95c3d2fd | fix incorrect metadata | [
{
"change_type": "MODIFY",
"old_path": "pyproject.toml",
"new_path": "pyproject.toml",
"diff": "[tool.poetry]\nname = \"obp\"\n-version = \"0.4.2\"\n+version = \"0.5.1\"\ndescription = \"Open Bandit Pipeline: a python library for bandit algorithms and off-policy evaluation\"\nauthors = [\"Yuta Saito <[email protected]>\"]\nlicense = \"Apache License 2.0\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix incorrect metadata |
641,014 | 26.10.2021 07:56:30 | 14,400 | 240a6ff429c1859e320d29a43aea587a2c5b37ef | add q-learner | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/__init__.py",
"new_path": "obp/policy/__init__.py",
"diff": "@@ -13,6 +13,7 @@ from obp.policy.logistic import LogisticTS\nfrom obp.policy.logistic import LogisticUCB\nfrom obp.policy.logistic import MiniBatchLogisticRegression\nfrom obp.policy.offline import IPWLearner\n+from obp.policy.offline import QLearner\nfrom obp.policy.offline import NNPolicyLearner\nfrom obp.policy.offline_continuous import ContinuousNNPolicyLearner\n@@ -34,5 +35,6 @@ __all__ = [\n\"MiniBatchLogisticRegression\",\n\"IPWLearner\",\n\"NNPolicyLearner\",\n+ \"QLearner\",\n\"ContinuousNNPolicyLearner\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -11,6 +11,7 @@ from typing import Union\nimport numpy as np\nfrom scipy.special import softmax\n+from sklearn.base import BaseEstimator\nfrom sklearn.base import ClassifierMixin\nfrom sklearn.base import clone\nfrom sklearn.base import is_classifier\n@@ -23,9 +24,12 @@ from torch.nn.functional import mse_loss\nimport torch.optim as optim\nfrom tqdm import tqdm\n+from ..ope import RegressionModel\nfrom ..utils import check_array\nfrom ..utils import check_bandit_feedback_inputs\nfrom ..utils import check_tensor\n+from ..utils import sample_action_fast\n+from ..utils import softmax as softmax_axis1\nfrom .base import BaseOfflinePolicyLearner\n@@ -167,7 +171,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\nposition = np.zeros_like(action, dtype=int)\nelse:\nif position is None:\n- raise ValueError(\"When `self.len_list=1`, `position` must be given.\")\n+ raise ValueError(\"When `self.len_list > 1`, `position` must be given.\")\nfor position_ in np.arange(self.len_list):\nX, sample_weight, y = self._create_train_data_for_opl(\n@@ -351,12 +355,272 @@ class IPWLearner(BaseOfflinePolicyLearner):\n@dataclass\n-class NNPolicyLearner(BaseOfflinePolicyLearner):\n- \"\"\"Off-policy learner based on a neural network policy.\n+class QLearner(BaseOfflinePolicyLearner):\n+ \"\"\"Off-policy learner with Direct Method.\n+\n+ Parameters\n+ -----------\n+ n_actions: int\n+ Number of actions.\n+\n+ len_list: int, default=1\n+ Length of a list of actions recommended in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\n+ base_model: BaseEstimator\n+ Machine learning model used to estimate the q function (expected reward function).\n+\n+ fitting_method: str, default='normal'\n+ Method to fit the regression model.\n+ Must be one of ['normal', 'iw'] where 'iw' stands for importance weighting.\n+\n+ \"\"\"\n+\n+ base_model: Optional[BaseEstimator] = None\n+ fitting_method: str = \"normal\"\n+\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize class.\"\"\"\n+ super().__post_init__()\n+\n+ self.q_estimator = RegressionModel(\n+ n_actions=self.n_actions,\n+ len_list=self.len_list,\n+ base_model=self.base_model,\n+ fitting_method=self.fitting_method,\n+ )\n+\n+ def fit(\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ pscore: Optional[np.ndarray] = None,\n+ position: Optional[np.ndarray] = None,\n+ ) -> None:\n+ \"\"\"Fits an offline bandit policy on the given logged bandit feedback data.\nNote\n--------\n- The neural network is implemented in PyTorch.\n+ This `fit` method trains an estimator for the q function :math:`\\\\q(x,a) := \\\\mathbb{E} [r \\\\mid x, a]` as follows.\n+\n+ .. math::\n+\n+ \\\\hat{\\\\q} \\\\in \\\\arg \\\\min_{\\\\q \\\\in \\\\Q} \\\\mathbb{E}_{n} [ \\\\ell ( r_i, q (x_i,a_i) ) ]\n+\n+ where :math:`\\\\ell` is a loss function in training the q estimator.\n+\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors in each round, i.e., :math:`x_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ reward: array-like, shape (n_rounds,)\n+ Observed rewards (or outcome) in each round, i.e., :math:`r_t`.\n+\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Position of recommendation interface where action was presented in each round of the given logged bandit data.\n+ If None is given, a learner assumes that there is only one position.\n+ When `len_list` > 1, position has to be set.\n+\n+ \"\"\"\n+ check_bandit_feedback_inputs(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ pscore=pscore,\n+ position=position,\n+ )\n+ if pscore is None:\n+ n_actions = np.int(action.max() + 1)\n+ pscore = np.ones_like(action) / n_actions\n+ if self.len_list == 1:\n+ position = np.zeros_like(action, dtype=int)\n+ else:\n+ if position is None:\n+ raise ValueError(\"When `self.len_list > 1`, `position` must be given.\")\n+\n+ unif_action_dist = np.ones((context.shape[0], self.n_actions, self.len_list))\n+ self.q_estimator.fit(\n+ context=context,\n+ action=action,\n+ reward=reward,\n+ position=position,\n+ pscore=pscore,\n+ action_dist=unif_action_dist,\n+ )\n+\n+ def predict(\n+ self,\n+ context: np.ndarray,\n+ tau: Union[int, float] = 1.0,\n+ ) -> np.ndarray:\n+ \"\"\"Predict best actions for new data deterministically.\n+\n+ Note\n+ --------\n+ This `predict` method predicts the best actions for new data deterministically as follows.\n+\n+ .. math::\n+\n+ \\\\hat{a}_i \\\\in \\\\arg \\\\max_{a \\\\in \\\\mathcal{A}} \\\\hat{q}(x_i, a)\n+\n+ where :math:`\\\\hat{q}(x,a)` is an estimator for the q function :math:`\\\\q(x,a) := \\\\mathbb{E} [r \\\\mid x, a]`.\n+ Note that the action set predicted by this `predict` method can contain duplicate items.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds_of_new_data, dim_context)\n+ Context vectors for new data.\n+\n+ Returns\n+ -----------\n+ action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Deterministic action choices by the QLearner.\n+ The output can contain duplicate items (when `len_list > 2`).\n+\n+ \"\"\"\n+ check_array(array=context, name=\"context\", expected_dim=2)\n+ check_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\n+\n+ q_hat = self.predict_score(context=context)\n+ q_hat_argmax = np.argmax(q_hat, axis=1).astype(int)\n+\n+ n_rounds = context.shape[0]\n+ action_dist = np.zeros_like(q_hat)\n+ for p in np.arange(self.len_list):\n+ action_dist[\n+ np.arange(n_rounds),\n+ q_hat_argmax[:, p],\n+ np.ones(n_rounds, dtype=int) * p,\n+ ] = 1\n+ return action_dist\n+\n+ def predict_score(self, context: np.ndarray) -> np.ndarray:\n+ \"\"\"Predict the expected reward for all possible products of action and position.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds_of_new_data, dim_context)\n+ Context vectors for new data.\n+\n+ Returns\n+ -----------\n+ q_hat: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Expected reward for all possible pairs of action and position. :math:`\\\\hat{q}(x,a)`.\n+\n+ \"\"\"\n+ check_array(array=context, name=\"context\", expected_dim=2)\n+\n+ q_hat = self.q_estimator.predict(context=context)\n+ return q_hat\n+\n+ def sample_action(\n+ self,\n+ context: np.ndarray,\n+ tau: Union[int, float] = 1.0,\n+ random_state: Optional[int] = None,\n+ ) -> np.ndarray:\n+ \"\"\"Sample actions based on the estimated expected rewards.\n+\n+ Note\n+ --------\n+ This `sample_action` method samples a set of actions for new data based on :math:`\\\\hat{q}` as follows.\n+\n+ .. math::\n+\n+ \\\\pi (a | x) = \\\\frac{\\\\mathrm{exp}( \\\\hat{q}(x,a) / \\\\tau)}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} \\\\mathrm{exp}( \\\\hat{q}(x,a^{\\\\prime}) / \\\\tau)}\n+\n+ :math:`\\\\tau` is a temperature hyperparameter.\n+ :math:`\\\\hat{q}: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\times \\\\mathcal{K} \\\\rightarrow \\\\mathbb{R}_{+}`\n+ is a q function estimator, which is now implemented in the `predict_score` method.\n+\n+ Parameters\n+ ----------------\n+ context: array-like, shape (n_rounds_of_new_data, dim_context)\n+ Context vectors for new data.\n+\n+ tau: int or float, default=1.0\n+ A temperature parameter, controlling the randomness of the action choice.\n+ As :math:`\\\\tau \\\\rightarrow \\\\infty`, the algorithm will select arms uniformly at random.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling actions.\n+\n+ Returns\n+ -----------\n+ action: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Action sampled based on the estimated expected rewards.\n+\n+ \"\"\"\n+ base_action_dist = self.predict_proba(context=context, tau=tau)\n+\n+ n_rounds = context.shape[0]\n+ action_dist = np.zeros_like(base_action_dist)\n+ for p in np.arange(self.len_list):\n+ sampled_action = sample_action_fast(\n+ base_action_dist[:, :, p], random_state=random_state\n+ )\n+ action_dist[\n+ np.arange(n_rounds),\n+ sampled_action,\n+ np.ones(n_rounds, dtype=int) * p,\n+ ] = 1\n+\n+ return action_dist\n+\n+ def predict_proba(\n+ self,\n+ context: np.ndarray,\n+ tau: Union[int, float] = 1.0,\n+ ) -> np.ndarray:\n+ \"\"\"Obtains action choice probabilities for new data based on the estimated expected rewards.\n+\n+ Note\n+ --------\n+ This `predict_proba` method obtains action choice probabilities for new data based on :math:`\\\\hat{q}` as follows.\n+\n+ .. math::\n+\n+ \\\\pi (a | x) = \\\\frac{\\\\mathrm{exp}( \\\\hat{q}(x,a) / \\\\tau)}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} \\\\mathrm{exp}( \\\\hat{q}(x,a^{\\\\prime}) / \\\\tau)}\n+\n+ :math:`\\\\tau` is a temperature hyperparameter.\n+ :math:`\\\\hat{q}: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\times \\\\mathcal{K} \\\\rightarrow \\\\mathbb{R}_{+}`\n+ is a q function estimator, which is now implemented in the `predict_score` method.\n+\n+ Parameters\n+ ----------------\n+ context: array-like, shape (n_rounds_of_new_data, dim_context)\n+ Context vectors for new data.\n+\n+ tau: int or float, default=1.0\n+ A temperature parameter, controlling the randomness of the action choice.\n+ As :math:`\\\\tau \\\\rightarrow \\\\infty`, the algorithm will select arms uniformly at random.\n+\n+ Returns\n+ -----------\n+ action_dist: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Action choice probabilities obtained from the estimated expected rewards.\n+\n+ \"\"\"\n+ check_array(array=context, name=\"context\", expected_dim=2)\n+ check_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\n+\n+ q_hat = self.predict_score(context=context)\n+ action_dist = softmax_axis1(q_hat / tau)\n+ return action_dist\n+\n+\n+@dataclass\n+class NNPolicyLearner(BaseOfflinePolicyLearner):\n+ \"\"\"Off-policy learner based on a neural network policy.\nParameters\n-----------\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add q-learner |
641,014 | 26.10.2021 16:43:09 | 14,400 | fdd4ca2814cc91720efaf6190e5753e7a36f61eb | unify some test funcs | [
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline.py",
"new_path": "tests/policy/test_offline.py",
"diff": "@@ -5,8 +5,8 @@ from sklearn.linear_model import LogisticRegression\nimport torch\nfrom obp.policy.offline import IPWLearner\n-from obp.policy.offline import QLearner\nfrom obp.policy.offline import NNPolicyLearner\n+from obp.policy.offline import QLearner\nfrom obp.policy.policy_type import PolicyType\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline_learner_performance.py",
"new_path": "tests/policy/test_offline_learner_performance.py",
"diff": "@@ -81,7 +81,7 @@ offline_experiment_configurations = [\n\"random_forest\",\n),\n(\n- 400,\n+ 800,\n10,\n10,\n\"lightgbm\",\n@@ -142,7 +142,7 @@ class UniformSampleWeightLearner(BaseOfflinePolicyLearner):\n) -> None:\nif pscore is None:\n- n_actions = np.int(action.max() + 1)\n+ n_actions = np.int32(action.max() + 1)\npscore = np.ones_like(action) / n_actions\nif position is None or self.len_list == 1:\nposition = np.zeros_like(action, dtype=int)\n@@ -176,7 +176,7 @@ class UniformSampleWeightLearner(BaseOfflinePolicyLearner):\n\"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model\",\noffline_experiment_configurations,\n)\n-def test_offline_ipwlearner_performance(\n+def test_offline_policy_learner_performance(\nn_rounds: int,\nn_actions: int,\ndim_context: int,\n@@ -192,13 +192,28 @@ def test_offline_ipwlearner_performance(\nbehavior_policy_function=linear_behavior_policy,\nrandom_state=i,\n)\n- # define evaluation policy using IPWLearner\n+ # sample new training and test sets of synthetic logged bandit feedback\n+ bandit_feedback_train = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ bandit_feedback_test = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+\n+ # defining policy learners\nipw_policy = IPWLearner(\nn_actions=dataset.n_actions,\nbase_classifier=base_model_dict[base_model_for_evaluation_policy](\n**hyperparams[base_model_for_evaluation_policy]\n),\n)\n+ q_policy = QLearner(\n+ n_actions=dataset.n_actions,\n+ base_model=base_model_dict[base_model_for_evaluation_policy](\n+ **hyperparams[base_model_for_evaluation_policy]\n+ ),\n+ )\n+ nn_policy = NNPolicyLearner(\n+ n_actions=dataset.n_actions,\n+ dim_context=dim_context,\n+ off_policy_objective=\"ipw\",\n+ )\n# baseline method 1. RandomPolicy\nrandom_policy = RandomPolicy(n_actions=dataset.n_actions)\n# baseline method 2. UniformSampleWeightLearner\n@@ -208,175 +223,20 @@ def test_offline_ipwlearner_performance(\n**hyperparams[base_model_for_evaluation_policy]\n),\n)\n- # sample new training and test sets of synthetic logged bandit feedback\n- bandit_feedback_train = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- bandit_feedback_test = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- # train the evaluation policy on the training set of the synthetic logged bandit feedback\n+\n+ # policy training\nipw_policy.fit(\ncontext=bandit_feedback_train[\"context\"],\naction=bandit_feedback_train[\"action\"],\nreward=bandit_feedback_train[\"reward\"],\npscore=bandit_feedback_train[\"pscore\"],\n)\n- uniform_sample_weight_policy.fit(\n- context=bandit_feedback_train[\"context\"],\n- action=bandit_feedback_train[\"action\"],\n- reward=bandit_feedback_train[\"reward\"],\n- pscore=bandit_feedback_train[\"pscore\"],\n- )\n- # predict the action decisions for the test set of the synthetic logged bandit feedback\n- ipw_action_dist = ipw_policy.predict(\n- context=bandit_feedback_test[\"context\"],\n- )\n- random_action_dist = random_policy.predict(\n- context=bandit_feedback_test[\"context\"],\n- )\n- uniform_sample_weight_action_dist = uniform_sample_weight_policy.predict(\n- context=bandit_feedback_test[\"context\"],\n- )\n- # get the ground truth policy value for each learner\n- gt_ipw_learner = dataset.calc_ground_truth_policy_value(\n- expected_reward=bandit_feedback_test[\"expected_reward\"],\n- action_dist=ipw_action_dist,\n- )\n- gt_random_policy = dataset.calc_ground_truth_policy_value(\n- expected_reward=bandit_feedback_test[\"expected_reward\"],\n- action_dist=random_action_dist,\n- )\n- gt_uniform_sample_weight_learner = dataset.calc_ground_truth_policy_value(\n- expected_reward=bandit_feedback_test[\"expected_reward\"],\n- action_dist=uniform_sample_weight_action_dist,\n- )\n-\n- return gt_ipw_learner, gt_random_policy, gt_uniform_sample_weight_learner\n-\n- n_runs = 10\n- processed = Parallel(\n- n_jobs=-1,\n- verbose=0,\n- )([delayed(process)(i) for i in np.arange(n_runs)])\n- list_gt_ipw, list_gt_random, list_gt_uniform = [], [], []\n- for i, ground_truth_policy_values in enumerate(processed):\n- gt_ipw, gt_random, gt_uniform = ground_truth_policy_values\n- list_gt_ipw.append(gt_ipw)\n- list_gt_random.append(gt_random)\n- list_gt_uniform.append(gt_uniform)\n-\n- assert np.mean(list_gt_ipw) > np.mean(list_gt_random)\n- assert np.mean(list_gt_ipw) > np.mean(list_gt_uniform)\n-\n-\[email protected](\n- \"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model\",\n- offline_experiment_configurations,\n-)\n-def test_offline_qlearner_performance(\n- n_rounds: int,\n- n_actions: int,\n- dim_context: int,\n- base_model_for_evaluation_policy: str,\n- base_model_for_reg_model: str,\n-) -> None:\n- def process(i: int):\n- # synthetic data generator\n- dataset = SyntheticBanditDataset(\n- n_actions=n_actions,\n- dim_context=dim_context,\n- reward_function=logistic_reward_function,\n- behavior_policy_function=linear_behavior_policy,\n- random_state=i,\n- )\n- # define evaluation policy using IPWLearner\n- q_policy = QLearner(\n- n_actions=dataset.n_actions,\n- base_model=base_model_dict[base_model_for_evaluation_policy](\n- **hyperparams[base_model_for_evaluation_policy]\n- ),\n- )\n- # baseline method 1. RandomPolicy\n- random_policy = RandomPolicy(n_actions=dataset.n_actions)\n- # sample new training and test sets of synthetic logged bandit feedback\n- bandit_feedback_train = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- bandit_feedback_test = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- # train the evaluation policy on the training set of the synthetic logged bandit feedback\nq_policy.fit(\ncontext=bandit_feedback_train[\"context\"],\naction=bandit_feedback_train[\"action\"],\nreward=bandit_feedback_train[\"reward\"],\npscore=bandit_feedback_train[\"pscore\"],\n)\n- # predict the action decisions for the test set of the synthetic logged bandit feedback\n- q_action_dist = q_policy.predict(\n- context=bandit_feedback_test[\"context\"],\n- )\n- random_action_dist = random_policy.predict(\n- context=bandit_feedback_test[\"context\"],\n- )\n- # get the ground truth policy value for each learner\n- gt_q_learner = dataset.calc_ground_truth_policy_value(\n- expected_reward=bandit_feedback_test[\"expected_reward\"],\n- action_dist=q_action_dist,\n- )\n- gt_random_policy = dataset.calc_ground_truth_policy_value(\n- expected_reward=bandit_feedback_test[\"expected_reward\"],\n- action_dist=random_action_dist,\n- )\n-\n- return gt_q_learner, gt_random_policy\n-\n- n_runs = 10\n- processed = Parallel(\n- n_jobs=-1,\n- verbose=0,\n- )([delayed(process)(i) for i in np.arange(n_runs)])\n- list_gt_q, list_gt_random = [], []\n- for i, ground_truth_policy_values in enumerate(processed):\n- gt_q, gt_random = ground_truth_policy_values\n- list_gt_q.append(gt_q)\n- list_gt_random.append(gt_random)\n-\n- assert np.mean(list_gt_q) > np.mean(list_gt_random)\n-\n-\[email protected](\n- \"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model\",\n- offline_experiment_configurations,\n-)\n-def test_offline_nn_policy_learner_performance(\n- n_rounds: int,\n- n_actions: int,\n- dim_context: int,\n- base_model_for_evaluation_policy: str,\n- base_model_for_reg_model: str,\n-) -> None:\n- def process(i: int):\n- # synthetic data generator\n- dataset = SyntheticBanditDataset(\n- n_actions=n_actions,\n- dim_context=dim_context,\n- reward_function=logistic_reward_function,\n- behavior_policy_function=linear_behavior_policy,\n- random_state=i,\n- )\n- # define evaluation policy using NNPolicyLearner\n- nn_policy = NNPolicyLearner(\n- n_actions=dataset.n_actions,\n- dim_context=dim_context,\n- off_policy_objective=\"ipw\",\n- )\n- # baseline method 1. RandomPolicy\n- random_policy = RandomPolicy(n_actions=dataset.n_actions)\n- # baseline method 2. UniformSampleWeightLearner\n- uniform_sample_weight_policy = UniformSampleWeightLearner(\n- n_actions=dataset.n_actions,\n- base_classifier=base_model_dict[base_model_for_evaluation_policy](\n- **hyperparams[base_model_for_evaluation_policy]\n- ),\n- )\n- # sample new training and test sets of synthetic logged bandit feedback\n- bandit_feedback_train = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- bandit_feedback_test = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- # train the evaluation policy on the training set of the synthetic logged bandit feedback\nnn_policy.fit(\ncontext=bandit_feedback_train[\"context\"],\naction=bandit_feedback_train[\"action\"],\n@@ -389,8 +249,15 @@ def test_offline_nn_policy_learner_performance(\nreward=bandit_feedback_train[\"reward\"],\npscore=bandit_feedback_train[\"pscore\"],\n)\n- # predict the action decisions for the test set of the synthetic logged bandit feedback\n- nn_policy_action_dist = nn_policy.predict(\n+\n+ # prediction/making decisions\n+ ipw_action_dist = ipw_policy.predict(\n+ context=bandit_feedback_test[\"context\"],\n+ )\n+ q_action_dist = q_policy.predict(\n+ context=bandit_feedback_test[\"context\"],\n+ )\n+ nn_action_dist = nn_policy.predict(\ncontext=bandit_feedback_test[\"context\"],\n)\nrandom_action_dist = random_policy.predict(\n@@ -399,10 +266,19 @@ def test_offline_nn_policy_learner_performance(\nuniform_sample_weight_action_dist = uniform_sample_weight_policy.predict(\ncontext=bandit_feedback_test[\"context\"],\n)\n- # get the ground truth policy value for each learner\n- gt_nn_policy_learner = dataset.calc_ground_truth_policy_value(\n+\n+ # evaluation\n+ gt_ipw_learner = dataset.calc_ground_truth_policy_value(\nexpected_reward=bandit_feedback_test[\"expected_reward\"],\n- action_dist=nn_policy_action_dist,\n+ action_dist=ipw_action_dist,\n+ )\n+ gt_q_learner = dataset.calc_ground_truth_policy_value(\n+ expected_reward=bandit_feedback_test[\"expected_reward\"],\n+ action_dist=q_action_dist,\n+ )\n+ gt_nn_learner = dataset.calc_ground_truth_policy_value(\n+ expected_reward=bandit_feedback_test[\"expected_reward\"],\n+ action_dist=nn_action_dist,\n)\ngt_random_policy = dataset.calc_ground_truth_policy_value(\nexpected_reward=bandit_feedback_test[\"expected_reward\"],\n@@ -413,19 +289,46 @@ def test_offline_nn_policy_learner_performance(\naction_dist=uniform_sample_weight_action_dist,\n)\n- return gt_nn_policy_learner, gt_random_policy, gt_uniform_sample_weight_learner\n+ return (\n+ gt_ipw_learner,\n+ gt_q_learner,\n+ gt_nn_learner,\n+ gt_random_policy,\n+ gt_uniform_sample_weight_learner,\n+ )\nn_runs = 10\nprocessed = Parallel(\n- n_jobs=1, # PyTorch uses multiple threads\n+ n_jobs=-1,\nverbose=0,\n)([delayed(process)(i) for i in np.arange(n_runs)])\n- list_gt_nn_policy, list_gt_random, list_gt_uniform = [], [], []\n- for i, ground_truth_policy_values in enumerate(processed):\n- gt_nn_policy, gt_random, gt_uniform = ground_truth_policy_values\n- list_gt_nn_policy.append(gt_nn_policy)\n+ list_gt_ipw = list()\n+ list_gt_q = list()\n+ list_gt_nn = list()\n+ list_gt_random = list()\n+ list_gt_unif_ipw = list()\n+ for i, gt_policy_values in enumerate(processed):\n+ gt_ipw, gt_q, gt_nn, gt_random, gt_unif_ipw = gt_policy_values\n+ list_gt_ipw.append(gt_ipw)\n+ list_gt_q.append(gt_q)\n+ list_gt_nn.append(gt_nn)\nlist_gt_random.append(gt_random)\n- list_gt_uniform.append(gt_uniform)\n+ list_gt_unif_ipw.append(gt_unif_ipw)\n- assert np.mean(list_gt_nn_policy) > np.mean(list_gt_random)\n- assert np.mean(list_gt_nn_policy) > np.mean(list_gt_uniform)\n+ # baseline learner performance\n+ print(f\"Performance of Random is {np.mean(list_gt_random)}\")\n+ print(\n+ f\"Performance of IPWLearner with Uniform Weight is {np.mean(list_gt_unif_ipw)}\"\n+ )\n+ # ipw learner performance\n+ print(f\"Performance of IPWLearner is {np.mean(list_gt_ipw)}\")\n+ assert np.mean(list_gt_ipw) > np.mean(list_gt_random)\n+ assert np.mean(list_gt_ipw) > np.mean(list_gt_unif_ipw)\n+ # q learner performance\n+ print(f\"Performance of QLearner is {np.mean(list_gt_q)}\")\n+ assert np.mean(list_gt_q) > np.mean(list_gt_random)\n+ assert np.mean(list_gt_q) > np.mean(list_gt_unif_ipw)\n+ # nn policy learner performance\n+ print(f\"Performance of NNPolicyLearner is {np.mean(list_gt_nn)}\")\n+ assert np.mean(list_gt_nn) > np.mean(list_gt_random)\n+ assert np.mean(list_gt_nn) > np.mean(list_gt_unif_ipw)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | unify some test funcs |
641,014 | 29.10.2021 23:53:31 | 14,400 | d5d107003f11ed37d1e4ff834cee02137e085278 | implement the gumble softmax trick to sample rankings of actions | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -29,7 +29,6 @@ from obp.ope import RegressionModel\nfrom ..utils import check_array\nfrom ..utils import check_bandit_feedback_inputs\nfrom ..utils import check_tensor\n-from ..utils import sample_action_fast\nfrom ..utils import softmax as softmax_axis1\nfrom .base import BaseOfflinePolicyLearner\n@@ -251,25 +250,23 @@ class IPWLearner(BaseOfflinePolicyLearner):\ntau: Union[int, float] = 1.0,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Sample (non-repetitive) actions based on scores predicted by a classifier.\n+ \"\"\"Sample a ranking of (non-repetitive) actions from the Plackett-Luce ranking distribution.\nNote\n--------\n- This `sample_action` method samples a **non-repetitive** set of actions for new data :math:`x \\\\in \\\\mathcal{X}`\n- by first computing non-negative scores for all possible candidate products of action and position\n- :math:`(a, k) \\\\in \\\\mathcal{A} \\\\times \\\\mathcal{K}` (where :math:`\\\\mathcal{A}` is an action set and\n- :math:`\\\\mathcal{K}` is a position set), and using softmax function as follows:\n+ This `sample_action` method samples a **non-repetitive** ranking of actions for new data\n+ :math:`x \\\\in \\\\mathcal{X}` via the so-colled \"Gumbel Softmax trick\" as follows.\n.. math::\n- & P (A_1 = a_1 | x) = \\\\frac{\\\\mathrm{exp}(f(x,a_1,1) / \\\\tau)}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} \\\\mathrm{exp}( f(x,a^{\\\\prime},1) / \\\\tau)} , \\\\\\\\\n- & P (A_2 = a_2 | A_1 = a_1, x) = \\\\frac{\\\\mathrm{exp}(f(x,a_2,2) / \\\\tau)}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A} \\\\backslash \\\\{a_1\\\\}} \\\\mathrm{exp}(f(x,a^{\\\\prime},2) / \\\\tau )} ,\n- \\\\ldots\n+ \\\\s (x,a) = \\\\hat{f}(x,a) / \\\\tau + \\\\gamma_{x,a}, \\\\quad \\\\gamma_{x,a} \\\\sim \\\\mathrm{Gumbel}(0,1)\n- where :math:`A_k` is a random variable representing an action at a position :math:`k`.\n:math:`\\\\tau` is a temperature hyperparameter.\n:math:`f: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\times \\\\mathcal{K} \\\\rightarrow \\\\mathbb{R}_{+}`\nis a scoring function which is now implemented in the `predict_score` method.\n+ :math:`\\\\gamma_{x,a}` is a random variable sampled from the Gumbel distribution.\n+ By sorting the actions based on :math:`\\\\s (x,a)` for each context, we can sample a ranking from\n+ the Plackett-Luce ranking distribution.\nParameters\n----------------\n@@ -286,7 +283,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\nReturns\n-----------\naction: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n- Action sampled by a trained classifier.\n+ Ranking of actions sampled by the Gumbel softmax trick as follows.\n\"\"\"\ncheck_array(array=context, name=\"context\", expected_dim=2)\n@@ -294,16 +291,13 @@ class IPWLearner(BaseOfflinePolicyLearner):\nn_rounds = context.shape[0]\nrandom_ = check_random_state(random_state)\n- action = np.zeros((n_rounds, self.n_actions, self.len_list))\n- score_predicted = self.predict_score(context=context)\n- for i in tqdm(np.arange(n_rounds), desc=\"[sample_action]\", total=n_rounds):\n- action_set = np.arange(self.n_actions)\n+ sampled_action = np.zeros((n_rounds, self.n_actions, self.len_list))\n+ scores = self.predict_score(context=context).mean(2) / tau\n+ scores += random_.gumbel(size=scores.shape)\n+ ranking = np.argsort(-scores, axis=1)\nfor position_ in np.arange(self.len_list):\n- score_ = softmax(score_predicted[i, action_set, position_] / tau)\n- action_sampled = random_.choice(action_set, p=score_, replace=False)\n- action[i, action_sampled, position_] = 1\n- action_set = np.delete(action_set, action_set == action_sampled)\n- return action\n+ sampled_action[np.arange(n_rounds), ranking[:, position_], position_] = 1\n+ return sampled_action\ndef predict_proba(\nself,\n@@ -315,9 +309,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\nNote\n--------\nThis `predict_proba` method obtains action choice probabilities for new data :math:`x \\\\in \\\\mathcal{X}`\n- by first computing non-negative scores for all possible candidate actions\n- :math:`a \\\\in \\\\mathcal{A}` (where :math:`\\\\mathcal{A}` is an action set),\n- and using a Plackett-Luce ranking model as follows:\n+ by applying the softmax function as follows:\n.. math::\n@@ -476,6 +468,9 @@ class QLearner(BaseOfflinePolicyLearner):\nwhere :math:`\\\\hat{q}(x,a)` is an estimator for the q function :math:`\\\\q(x,a) := \\\\mathbb{E} [r \\\\mid x, a]`.\nNote that the action set predicted by this `predict` method can contain duplicate items.\n+ Action set predicted by this `predict` method can contain duplicate items.\n+ If you want a non-repetitive action set, then please use the `sample_action` method.\n+\nParameters\n-----------\ncontext: array-like, shape (n_rounds_of_new_data, dim_context)\n@@ -529,19 +524,23 @@ class QLearner(BaseOfflinePolicyLearner):\ntau: Union[int, float] = 1.0,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Sample actions based on the estimated expected rewards.\n+ \"\"\"Sample a ranking of (non-repetitive) actions from the Plackett-Luce ranking distribution.\nNote\n--------\n- This `sample_action` method samples a set of actions for new data based on :math:`\\\\hat{q}` as follows.\n+ This `sample_action` method samples a ranking of (non-repetitive) actions for new data\n+ based on :math:`\\\\hat{q}` and the so-colled \"Gumbel Softmax trick\" as follows.\n.. math::\n- \\\\pi (a | x) = \\\\frac{\\\\mathrm{exp}( \\\\hat{q}(x,a) / \\\\tau)}{\\\\sum_{a^{\\\\prime} \\\\in \\\\mathcal{A}} \\\\mathrm{exp}( \\\\hat{q}(x,a^{\\\\prime}) / \\\\tau)}\n+ \\\\s (x,a) = \\\\hat{q}(x,a) / \\\\tau + \\\\gamma_{x,a}, \\\\quad \\\\gamma_{x,a} \\\\sim \\\\mathrm{Gumbel}(0,1)\n:math:`\\\\tau` is a temperature hyperparameter.\n:math:`\\\\hat{q}: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\times \\\\mathcal{K} \\\\rightarrow \\\\mathbb{R}_{+}`\nis a q function estimator, which is now implemented in the `predict_score` method.\n+ :math:`\\\\gamma_{x,a}` is a random variable sampled from the Gumbel distribution.\n+ By sorting the actions based on :math:`\\\\s (x,a)` for each context, we can sample a ranking from\n+ the Plackett-Luce ranking distribution.\nParameters\n----------------\n@@ -557,25 +556,22 @@ class QLearner(BaseOfflinePolicyLearner):\nReturns\n-----------\n- action: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n- Action sampled based on the estimated expected rewards.\n+ sampled_action: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Ranking of action sampled from the Plackett-Luce ranking distribution by the Gumbel softmax trick.\n\"\"\"\n- base_action_dist = self.predict_proba(context=context, tau=tau)\n+ check_array(array=context, name=\"context\", expected_dim=2)\n+ check_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\nn_rounds = context.shape[0]\n- action_dist = np.zeros_like(base_action_dist)\n- for p in np.arange(self.len_list):\n- sampled_action = sample_action_fast(\n- base_action_dist[:, :, p], random_state=random_state\n- )\n- action_dist[\n- np.arange(n_rounds),\n- sampled_action,\n- np.ones(n_rounds, dtype=int) * p,\n- ] = 1\n-\n- return action_dist\n+ random_ = check_random_state(random_state)\n+ sampled_action = np.zeros((n_rounds, self.n_actions, self.len_list))\n+ scores = self.predict_score(context=context).mean(2) / tau\n+ scores += random_.gumbel(size=scores.shape)\n+ ranking = np.argsort(-scores, axis=1)\n+ for position_ in np.arange(self.len_list):\n+ sampled_action[np.arange(n_rounds), ranking[:, position_], position_] = 1\n+ return sampled_action\ndef predict_proba(\nself,\n@@ -631,7 +627,6 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nlen_list: int, default=1\nLength of a list of actions recommended in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- Currently, len_list > 1 is not supported.\ndim_context: int\nNumber of dimensions of context vectors.\n@@ -770,9 +765,6 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n\"\"\"Initialize class.\"\"\"\nsuper().__post_init__()\n- if self.len_list != 1:\n- raise NotImplementedError(\"currently, len_list > 1 is not supported\")\n-\ncheck_scalar(self.dim_context, \"dim_context\", int, min_val=1)\nif self.off_policy_objective not in [\"dm\", \"ipw\", \"dr\"]:\n@@ -1044,8 +1036,6 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\npscore = np.ones_like(action) / self.n_actions\nif self.len_list == 1:\nposition = np.zeros_like(action, dtype=int)\n- else:\n- raise NotImplementedError(\"currently, len_list > 1 is not supported\")\n# train q function estimator when it is needed to train NNPolicy\nif self.off_policy_objective != \"ipw\":\n@@ -1291,25 +1281,47 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\ndef sample_action(\nself,\ncontext: np.ndarray,\n+ tau: Union[int, float] = 1.0,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Sample (non-repetitive) actions based on action choice probabilities.\n+ \"\"\"Sample a ranking of (non-repetitive) actions from the Plackett-Luce ranking distribution.\n+\n+ Note\n+ --------\n+ This `sample_action` method samples a **non-repetitive** ranking of actions for new data\n+ :math:`x \\\\in \\\\mathcal{X}` via the so-colled \"Gumbel Softmax trick\" as follows.\n+\n+ .. math::\n+\n+ \\\\s (x,a) = \\\\hat{f}(x,a) / \\\\tau + \\\\gamma_{x,a}, \\\\quad \\\\gamma_{x,a} \\\\sim \\\\mathrm{Gumbel}(0,1)\n+\n+ :math:`\\\\tau` is a temperature hyperparameter.\n+ :math:`f: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\times \\\\mathcal{K} \\\\rightarrow \\\\mathbb{R}_{+}`\n+ is a scoring function which is now implemented in the `predict_score` method.\n+ :math:`\\\\gamma_{x,a}` is a random variable sampled from the Gumbel distribution.\n+ By sorting the actions based on :math:`\\\\s (x,a)` for each context, we can sample a ranking from\n+ the Plackett-Luce ranking distribution.\nParameters\n----------------\ncontext: array-like, shape (n_rounds_of_new_data, dim_context)\nContext vectors for new data.\n+ tau: int or float, default=1.0\n+ A temperature parameter, controlling the randomness of the action choice.\n+ As :math:`\\\\tau \\\\rightarrow \\\\infty`, the algorithm will select arms uniformly at random.\n+\nrandom_state: int, default=None\nControls the random seed in sampling actions.\nReturns\n-----------\n- action: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n- Action sampled by a trained classifier.\n+ sampled_action: array-like, shape (n_rounds_of_new_data, n_actions, len_list)\n+ Ranking of action sampled from the Plackett-Luce ranking distribution by the Gumbel softmax trick.\n\"\"\"\ncheck_array(array=context, name=\"context\", expected_dim=2)\n+ check_scalar(tau, name=\"tau\", target_type=(int, float), min_val=0)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n\"Expected `context.shape[1] == self.dim_context`, but found it False\"\n@@ -1317,16 +1329,13 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nn_rounds = context.shape[0]\nrandom_ = check_random_state(random_state)\n- action = np.zeros((n_rounds, self.n_actions, self.len_list))\n- score_predicted = self.predict_proba(context=context)\n- for i in tqdm(np.arange(n_rounds), desc=\"[sample_action]\", total=n_rounds):\n- action_set = np.arange(self.n_actions)\n+ sampled_action = np.zeros((n_rounds, self.n_actions, self.len_list))\n+ scores = self.predict_proba(context=context).mean(2) / tau\n+ scores += random_.gumbel(size=scores.shape)\n+ ranking = np.argsort(-scores, axis=1)\nfor position_ in np.arange(self.len_list):\n- score_ = score_predicted[i, action_set, position_]\n- action_sampled = random_.choice(action_set, p=score_, replace=False)\n- action[i, action_sampled, position_] = 1\n- action_set = np.delete(action_set, action_set == action_sampled)\n- return action\n+ sampled_action[np.arange(n_rounds), ranking[:, position_], position_] = 1\n+ return sampled_action\ndef predict_proba(\nself,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement the gumble softmax trick to sample rankings of actions |
641,014 | 10.11.2021 21:04:00 | 18,000 | a67356b2471c86d1f04a3594fabb8b2e65007c1b | modify reward and behavior policy functions | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -8,6 +8,7 @@ from typing import Optional\nimport numpy as np\nfrom scipy.stats import truncnorm\n+from sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils import check_scalar\n@@ -30,7 +31,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nwe have different bandit samples with the same setting.\nThis can be used to estimate confidence intervals of the performances of OPE estimators.\n- If None is set as `behavior_policy_function`, the synthetic data will be context-free bandit feedback.\n+ If None is given as `behavior_policy_function`, the synthetic data will be context-free bandit feedback.\nParameters\n-----------\n@@ -49,7 +50,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nreward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\nFunction generating expected reward for each given action-context pair,\ni.e., :math:`\\\\mu: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n- If None is set, context **independent** expected reward for each action will be\n+ If None is given, context **independent** expected reward for each action will be\nsampled from the uniform distribution automatically.\nreward_std: float, default=1.0\n@@ -57,15 +58,21 @@ class SyntheticBanditDataset(BaseBanditDataset):\nA larger value leads to a noisy reward distribution.\nThis argument is valid only when `reward_type=\"continuous\"`.\n+ action_context: np.ndarray, default=None\n+ Vector representation of (discrete) actions.\n+ If None is given, one-hot representation will be used.\n+\nbehavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default=None\n- Function generating probability distribution over action space,\n- i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n- If None is set, context **independent** uniform distribution will be used (uniform random behavior policy).\n+ Function generating logit values, which will be used to generate behavior policy via softmax transformation.\n+ If None is given, behavior policy will be generated by applying the softmax function to the expected reward.\n+ Thus, in this case, it is possible to control the optimality of the behavior policy by customizing `beta`.\n+ If `beta` is large, the behavior policy is near-optimal,\n+ while a small `beta` leads to a sub-optimal behavior policy.\n- tau: float, default=1.0\n- A temperature hyperparameer which controls the behavior policy.\n- A large value leads to a near-uniform behavior policy,\n- while a small value leads to a near-deterministic behavior policy.\n+ beta: float, default=1.0\n+ Inverse temperature parameter, which controls the entropy of behavior policy.\n+ A large value leads to a near-deterministic behavior policy,\n+ while a small value leads to a near-uniform behavior policy.\nrandom_state: int, default=12345\nControls the random seed in sampling synthetic bandit dataset.\n@@ -142,10 +149,11 @@ class SyntheticBanditDataset(BaseBanditDataset):\nreward_type: str = RewardType.BINARY.value\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\nreward_std: float = 1.0\n+ action_context: Optional[np.ndarray] = None\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n] = None\n- tau: float = 1.0\n+ beta: float = 1.0\nrandom_state: int = 12345\ndataset_name: str = \"synthetic_bandit_dataset\"\n@@ -158,22 +166,30 @@ class SyntheticBanditDataset(BaseBanditDataset):\nRewardType.CONTINUOUS,\n]:\nraise ValueError(\n- f\"reward_type must be either '{RewardType.BINARY.value}' or '{RewardType.CONTINUOUS.value}', but {self.reward_type} is given.'\"\n+ f\"`reward_type` must be either '{RewardType.BINARY.value}' or '{RewardType.CONTINUOUS.value}',\"\n+ f\"but {self.reward_type} is given.'\"\n)\n+ check_scalar(self.beta, \"beta\", (int, float))\ncheck_scalar(self.reward_std, \"reward_std\", (int, float), min_val=0)\n- check_scalar(self.tau, \"tau\", (int, float), min_val=0)\nif self.random_state is None:\nraise ValueError(\"`random_state` must be given\")\nself.random_ = check_random_state(self.random_state)\nif self.reward_function is None:\nself.expected_reward = self.sample_contextfree_expected_reward()\n- if self.behavior_policy_function is None:\n- self.behavior_policy = np.ones(self.n_actions) / self.n_actions\nif RewardType(self.reward_type) == RewardType.CONTINUOUS:\nself.reward_min = 0\nself.reward_max = 1e10\n- # one-hot encoding representations characterizing each action\n+ # one-hot encoding representations characterizing actions.\n+ if self.action_context is None:\nself.action_context = np.eye(self.n_actions, dtype=int)\n+ else:\n+ check_array(\n+ array=self.action_context, name=\"action_context\", expected_dim=2\n+ )\n+ if self.action_context.shape[0] != self.n_actions:\n+ raise ValueError(\n+ \"Expected `action_context.shape[0] == n_actions`, but found it False.'\"\n+ )\n@property\ndef len_list(self) -> int:\n@@ -229,10 +245,10 @@ class SyntheticBanditDataset(BaseBanditDataset):\nParameters\n-----------\ncontext: array-like, shape (n_rounds, dim_context)\n- Context vectors characterizing each round (such as user information).\n+ Context vectors characterizing each data (such as user information).\naction: array-like, shape (n_rounds,)\n- Selected actions to the contexts.\n+ Actions chosen for each context.\nReturns\n---------\n@@ -269,26 +285,8 @@ class SyntheticBanditDataset(BaseBanditDataset):\n\"\"\"\ncheck_scalar(n_rounds, \"n_rounds\", int, min_val=1)\ncontext = self.random_.normal(size=(n_rounds, self.dim_context))\n- # sample actions for each round based on the behavior policy\n- if self.behavior_policy_function is None:\n- behavior_policy_ = np.tile(self.behavior_policy, (n_rounds, 1))\n- behavior_policy_ = softmax(behavior_policy_ / self.tau)\n- action = self.random_.choice(\n- np.arange(self.n_actions), p=self.behavior_policy, size=n_rounds\n- )\n- else:\n- behavior_policy_ = self.behavior_policy_function(\n- context=context,\n- action_context=self.action_context,\n- random_state=self.random_state,\n- )\n- behavior_policy_ = softmax(behavior_policy_ / self.tau)\n- action = sample_action_fast(\n- behavior_policy_, random_state=self.random_state\n- )\n- pscore = behavior_policy_[np.arange(n_rounds), action]\n- # sample reward based on the context and action\n+ # calc expected reward given context and action\nexpected_reward_ = self.calc_expected_reward(context)\nif RewardType(self.reward_type) == RewardType.CONTINUOUS:\n# correct expected_reward_, as we use truncated normal distribution here\n@@ -298,6 +296,21 @@ class SyntheticBanditDataset(BaseBanditDataset):\nexpected_reward_ = truncnorm.stats(\na=a, b=b, loc=mean, scale=self.reward_std, moments=\"m\"\n)\n+\n+ # sample actions for each round based on the behavior policy\n+ if self.behavior_policy_function is None:\n+ pi_b = softmax(self.beta * expected_reward_)\n+ else:\n+ pi_b_logits = self.behavior_policy_function(\n+ context=context,\n+ action_context=self.action_context,\n+ random_state=self.random_state,\n+ )\n+ pi_b = softmax(self.beta * pi_b_logits)\n+ action = sample_action_fast(pi_b, random_state=self.random_state)\n+ pscore = pi_b[np.arange(n_rounds), action]\n+\n+ # sample reward based on the context and action\nreward = self.sample_reward_given_expected_reward(expected_reward_, action)\nreturn dict(\n@@ -309,6 +322,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nposition=None, # position effect is not considered in synthetic data\nreward=reward,\nexpected_reward=expected_reward_,\n+ pi_b=pi_b[:, :, np.newaxis],\npscore=pscore,\n)\n@@ -349,17 +363,22 @@ class SyntheticBanditDataset(BaseBanditDataset):\ndef logistic_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ degree: int = 3,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Logistic mean reward function for synthetic bandit datasets.\n+ \"\"\"Logistic mean reward function.\nParameters\n-----------\ncontext: array-like, shape (n_rounds, dim_context)\n- Context vectors characterizing each round (such as user information).\n+ Context vectors characterizing each data (such as user information).\naction_context: array-like, shape (n_actions, dim_action_context)\n- Vector representation for each action.\n+ Vector representation of actions.\n+\n+ degree: int, default=3\n+ Specifies the maximal degree of the polynomial feature transformations\n+ applied to both `context` and `action_context`.\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\n@@ -367,19 +386,32 @@ def logistic_reward_function(\nReturns\n---------\nexpected_reward: array-like, shape (n_rounds, n_actions)\n- Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+ Expected reward given context (:math:`x`) and action (:math:`a`),\n+ i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n\"\"\"\n+ check_scalar(degree, \"degree\", int, min_val=1)\ncheck_array(array=context, name=\"context\", expected_dim=2)\ncheck_array(array=action_context, name=\"action_context\", expected_dim=2)\n+ poly = PolynomialFeatures(degree=degree)\n+ context_ = poly.fit_transform(context)\n+ action_context_ = poly.fit_transform(action_context)\n+ datasize, context_dim = context_.shape\n+ n_actions, action_context_dim = action_context_.shape\n+\nrandom_ = check_random_state(random_state)\n- logits = np.zeros((context.shape[0], action_context.shape[0]))\n- # each arm has different coefficient vectors\n- coef_ = random_.uniform(size=(action_context.shape[0], context.shape[1]))\n- action_coef_ = random_.uniform(size=action_context.shape[1])\n- for d in np.arange(action_context.shape[0]):\n- logits[:, d] = context @ coef_[d] + action_context[d] @ action_coef_\n+ context_coef_ = random_.uniform(-1, 1, size=context_dim)\n+ action_coef_ = random_.uniform(-1, 1, size=action_context_dim)\n+ context_action_coef_ = random_.uniform(\n+ -1, 1, size=(context_dim, action_context_dim)\n+ )\n+\n+ context_logits = np.tile(context_ @ context_coef_, (n_actions, 1)).T\n+ action_logits = np.tile(action_coef_ @ action_context_.T, (datasize, 1))\n+ context_action_logits = context_ @ context_action_coef_ @ action_context_.T\n+ logits = context_logits + action_logits + context_action_logits\n+ logits = degree * (logits - logits.mean()) / logits.std()\nreturn sigmoid(logits)\n@@ -387,73 +419,105 @@ def logistic_reward_function(\ndef linear_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ degree: int = 3,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Linear mean reward function for synthetic bandit datasets.\n+ \"\"\"Linear mean reward function.\nParameters\n-----------\ncontext: array-like, shape (n_rounds, dim_context)\n- Context vectors characterizing each round (such as user information).\n+ Context vectors characterizing each data (such as user information).\naction_context: array-like, shape (n_actions, dim_action_context)\n- Vector representation for each action.\n+ Vector representation of actions.\n+\n+ degree: int, default=3\n+ Specifies the maximal degree of the polynomial feature transformations\n+ applied to both `context` and `action_context`.\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\nReturns\n---------\n- expected_reward: array-like, shape (n_rounds, n_actions)\n+ expected_rewards: array-like, shape (n_rounds, n_actions)\nExpected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n\"\"\"\n+ check_scalar(degree, \"degree\", int, min_val=1)\ncheck_array(array=context, name=\"context\", expected_dim=2)\ncheck_array(array=action_context, name=\"action_context\", expected_dim=2)\n+ poly = PolynomialFeatures(degree=degree)\n+ context_ = poly.fit_transform(context)\n+ action_context_ = poly.fit_transform(action_context)\n+ datasize, context_dim = context_.shape\n+ n_actions, action_context_dim = action_context_.shape\n+\nrandom_ = check_random_state(random_state)\n- expected_reward = np.zeros((context.shape[0], action_context.shape[0]))\n- # each arm has different coefficient vectors\n- coef_ = random_.uniform(size=(action_context.shape[0], context.shape[1]))\n- action_coef_ = random_.uniform(size=action_context.shape[1])\n- for d in np.arange(action_context.shape[0]):\n- expected_reward[:, d] = context @ coef_[d] + action_context[d] @ action_coef_\n+ context_coef_ = random_.uniform(-1, 1, size=context_dim)\n+ action_coef_ = random_.uniform(-1, 1, size=action_context_dim)\n+ context_action_coef_ = random_.uniform(\n+ -1, 1, size=(context_dim, action_context_dim)\n+ )\n- return expected_reward\n+ context_values = np.tile(context_ @ context_coef_, (n_actions, 1)).T\n+ action_values = np.tile(action_coef_ @ action_context_.T, (datasize, 1))\n+ context_action_values = context_ @ context_action_coef_ @ action_context_.T\n+ expected_rewards = context_values + action_values + context_action_values\n+ expected_rewards = (\n+ degree * (expected_rewards - expected_rewards.mean()) / expected_rewards.std()\n+ )\n+\n+ return expected_rewards\ndef linear_behavior_policy(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ degree: int = 3,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Linear contextual behavior policy for synthetic bandit datasets.\n+ \"\"\"Linear contextual behavior policy.\nParameters\n-----------\ncontext: array-like, shape (n_rounds, dim_context)\n- Context vectors characterizing each round (such as user information).\n+ Context vectors characterizing each data (such as user information).\naction_context: array-like, shape (n_actions, dim_action_context)\n- Vector representation for each action.\n+ Vector representation of actions.\n+\n+ degree: int, default=3\n+ Specifies the maximal degree of the polynomial feature transformations\n+ applied to both `context` and `action_context`.\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\nReturns\n---------\n- behavior_policy: array-like, shape (n_rounds, n_actions)\n- Logit values given context (:math:`x`), i.e., :math:`\\\\pi: \\\\mathcal{X} \\\\rightarrow \\\\Delta(\\\\mathcal{A})`.\n+ pi_b_logits: array-like, shape (n_rounds, n_actions)\n+ Logit values given context (:math:`x`).\n+ The softmax function will be applied to transform it to action choice probabilities.\n\"\"\"\n+ check_scalar(degree, \"degree\", int, min_val=1)\ncheck_array(array=context, name=\"context\", expected_dim=2)\ncheck_array(array=action_context, name=\"action_context\", expected_dim=2)\n+ poly = PolynomialFeatures(degree=degree)\n+ context_ = poly.fit_transform(context)\n+ action_context_ = poly.fit_transform(action_context)\n+ context_dim = context_.shape[1]\n+ action_context_dim = action_context_.shape[1]\n+\nrandom_ = check_random_state(random_state)\n- logits = np.zeros((context.shape[0], action_context.shape[0]))\n- coef_ = random_.uniform(size=context.shape[1])\n- action_coef_ = random_.uniform(size=action_context.shape[1])\n- for d in np.arange(action_context.shape[0]):\n- logits[:, d] = context @ coef_ + action_context[d] @ action_coef_\n+ coef = random_.uniform(size=(context_dim, action_context_dim))\n+ action_coef = random_.uniform(size=action_context_dim)\n+\n+ pi_b_logits = context_ @ coef @ action_context_.T + action_coef @ action_context_.T\n+ pi_b_logits = degree * (pi_b_logits - pi_b_logits.mean()) / pi_b_logits.std()\n- return logits\n+ return pi_b_logits\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | modify reward and behavior policy functions |
641,014 | 12.11.2021 10:02:19 | 18,000 | 99c0d632a72b8168a8823e9674ec51bbe12462f3 | define base reward and behavior policy functions | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -4,7 +4,10 @@ from obp.dataset.multiclass import MultiClassToBanditReduction\nfrom obp.dataset.real import OpenBanditDataset\nfrom obp.dataset.synthetic import linear_behavior_policy\nfrom obp.dataset.synthetic import linear_reward_function\n+from obp.dataset.synthetic import logistic_polynomial_reward_function\nfrom obp.dataset.synthetic import logistic_reward_function\n+from obp.dataset.synthetic import polynomial_behavior_policy\n+from obp.dataset.synthetic import polynomial_reward_function\nfrom obp.dataset.synthetic import SyntheticBanditDataset\nfrom obp.dataset.synthetic_continuous import linear_behavior_policy_continuous\nfrom obp.dataset.synthetic_continuous import linear_reward_funcion_continuous\n@@ -24,8 +27,11 @@ __all__ = [\n\"OpenBanditDataset\",\n\"SyntheticBanditDataset\",\n\"logistic_reward_function\",\n+ \"logistic_polynomial_reward_function\",\n\"linear_reward_function\",\n+ \"polynomial_reward_function\",\n\"linear_behavior_policy\",\n+ \"polynomial_behavior_policy\",\n\"MultiClassToBanditReduction\",\n\"SyntheticContinuousBanditDataset\",\n\"linear_reward_funcion_continuous\",\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -363,10 +363,9 @@ class SyntheticBanditDataset(BaseBanditDataset):\ndef logistic_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n- degree: int = 3,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Logistic mean reward function.\n+ \"\"\"Logistic mean reward function for binary rewards.\nParameters\n-----------\n@@ -376,10 +375,6 @@ def logistic_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation of actions.\n- degree: int, default=3\n- Specifies the maximal degree of the polynomial feature transformations\n- applied to both `context` and `action_context`.\n-\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\n@@ -390,39 +385,159 @@ def logistic_reward_function(\ni.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n\"\"\"\n- check_scalar(degree, \"degree\", int, min_val=1)\n- check_array(array=context, name=\"context\", expected_dim=2)\n- check_array(array=action_context, name=\"action_context\", expected_dim=2)\n+ logits = _base_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ degree=1,\n+ random_state=random_state,\n+ )\n- poly = PolynomialFeatures(degree=degree)\n- context_ = poly.fit_transform(context)\n- action_context_ = poly.fit_transform(action_context)\n- datasize, context_dim = context_.shape\n- n_actions, action_context_dim = action_context_.shape\n+ return sigmoid(logits)\n- random_ = check_random_state(random_state)\n- context_coef_ = random_.uniform(-1, 1, size=context_dim)\n- action_coef_ = random_.uniform(-1, 1, size=action_context_dim)\n- context_action_coef_ = random_.uniform(\n- -1, 1, size=(context_dim, action_context_dim)\n- )\n- context_logits = np.tile(context_ @ context_coef_, (n_actions, 1)).T\n- action_logits = np.tile(action_coef_ @ action_context_.T, (datasize, 1))\n- context_action_logits = context_ @ context_action_coef_ @ action_context_.T\n- logits = context_logits + action_logits + context_action_logits\n- logits = degree * (logits - logits.mean()) / logits.std()\n+def logistic_polynomial_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Logistic mean reward function for binary rewards.\n+\n+ Note\n+ ------\n+ Polynomial and interaction features will be used to calculate the expected rewards.\n+ Feature transformation is based on `sklearn.preprocessing.PolynomialFeatures(degree=3)`\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each data (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation of actions.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_reward: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`),\n+ i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+\n+ \"\"\"\n+ logits = _base_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ degree=3,\n+ random_state=random_state,\n+ )\nreturn sigmoid(logits)\ndef linear_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Linear mean reward function for continuous rewards.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each data (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation of actions.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_rewards: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`),\n+ i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+\n+ \"\"\"\n+ return _base_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ degree=1,\n+ random_state=random_state,\n+ )\n+\n+\n+def polynomial_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Polynomial mean reward function for continuous rewards.\n+\n+ Note\n+ ------\n+ Polynomial and interaction features will be used to calculate the expected rewards.\n+ Feature transformation is based on `sklearn.preprocessing.PolynomialFeatures(degree=3)`\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each data (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation of actions.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_rewards: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`),\n+ i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+\n+ \"\"\"\n+ return _base_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ degree=3,\n+ random_state=random_state,\n+ )\n+\n+\n+def _base_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ndegree: int = 3,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Linear mean reward function.\n+ \"\"\"Base function to define mean reward functions.\n+\n+ Note\n+ ------\n+ Given context :math:`x` and action_context :math:`a`, this function is used to define\n+ mean reward function :math:`q(x,a) = \\\\mathbb{E}[r|x,a]` as follows.\n+\n+ .. math::\n+\n+ q(x,a) := \\\\tilde{x}^T M_{X,A} \\\\tilde{a} + \\\\theta_x^T \\\\tilde{x} + \\\\theta_a^T \\\\tilde{a},\n+\n+ where :math:`x` is a original context vector,\n+ and :math:`a` is a original action_context vector representing actions.\n+ Polynomial transformation is applied to original context and action vectors,\n+ producing :math:`x \\\\in \\\\mathbb{R}^{d_X}` and :math:`\\\\tilde{a} \\\\in \\\\mathbb{R}^{d_A}`.\n+ :math:`M_{X,A} \\\\mathbb{R}^{d_X \\\\times d_A}`, :math:`\\\\theta_x \\\\in \\\\mathbb{R}^{d_X}`,\n+ and :math:`\\\\theta_a \\\\in \\\\mathbb{R}^{d_A}` are parameter matrix and vectors,\n+ all sampled from the uniform distribution.\n+ The logistic function will be applied to :math:`q(x,a)` in logistic reward functions\n+ to adjust the range of the function output.\n+\n+ Currently, this function is used to define\n+ `obp.dataset.linear_reward function` (degree=1),\n+ `obp.dataset.polynomial_reward function` (degree=3),\n+ `obp.dataset.logistic_reward function` (degree=1),\n+ and `obp.dataset.logistic_polynomial_reward_function` (degree=3).\nParameters\n-----------\n@@ -442,7 +557,8 @@ def linear_reward_function(\nReturns\n---------\nexpected_rewards: array-like, shape (n_rounds, n_actions)\n- Expected reward given context (:math:`x`) and action (:math:`a`), i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+ Expected reward given context (:math:`x`) and action (:math:`a`),\n+ i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n\"\"\"\ncheck_scalar(degree, \"degree\", int, min_val=1)\n@@ -474,12 +590,105 @@ def linear_reward_function(\ndef linear_behavior_policy(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Linear behavior policy function.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each data (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation of actions.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ pi_b_logits: array-like, shape (n_rounds, n_actions)\n+ Logit values given context (:math:`x`).\n+ The softmax function will be applied to transform it to action choice probabilities.\n+\n+ \"\"\"\n+ return _base_behavior_policy_function(\n+ context=context,\n+ action_context=action_context,\n+ degree=1,\n+ random_state=random_state,\n+ )\n+\n+\n+def polynomial_behavior_policy(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Polynomial behavior policy function.\n+\n+ Note\n+ ------\n+ Polynomial and interaction features will be used to calculate the expected rewards.\n+ Feature transformation is based on `sklearn.preprocessing.PolynomialFeatures(degree=3)`\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each data (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation of actions.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ pi_b_logits: array-like, shape (n_rounds, n_actions)\n+ Logit values given context (:math:`x`).\n+ The softmax function will be applied to transform it to action choice probabilities.\n+\n+ \"\"\"\n+ return _base_behavior_policy_function(\n+ context=context,\n+ action_context=action_context,\n+ degree=3,\n+ random_state=random_state,\n+ )\n+\n+\n+def _base_behavior_policy_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ndegree: int = 3,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Linear contextual behavior policy.\n+ \"\"\"Base function to define behavior policy functions.\n+\n+ Note\n+ ------\n+ Given context :math:`x` and action_context :math:`x_a`, this function generates\n+ logit values for defining a behavior policy as follows.\n+\n+ .. math::\n+\n+ f_b(x,a) := \\\\tilde{x}^T M_{X,A} \\\\tilde{a} + \\\\theta_a^T \\\\tilde{a},\n+\n+ where :math:`x` is a original context vector,\n+ and :math:`a` is a original action_context vector representing actions.\n+ Polynomial transformation is applied to original context and action vectors,\n+ producing :math:`x \\\\in \\\\mathbb{R}^{d_X}` and :math:`\\\\tilde{a} \\\\in \\\\mathbb{R}^{d_A}`.\n+ :math:`M_{X,A} \\\\mathbb{R}^{d_X \\\\times d_A}` and :math:`\\\\theta_a \\\\in \\\\mathbb{R}^{d_A}` are\n+ parameter matrix and vector, each sampled from the uniform distribution.\n+ The softmax function will be applied to :math:`f_b(x,\\\\cdot)` in `obp.dataset.SyntheticDataset`\n+ to generate distribution over actions (behavior policy).\n+\n+ Currently, this function is used to define\n+ `obp.dataset.linear_behavior_policy` (degree=1)\n+ and `obp.dataset.polynomial_behavior_policy` (degree=3).\nParameters\n-----------\n@@ -514,10 +723,11 @@ def linear_behavior_policy(\naction_context_dim = action_context_.shape[1]\nrandom_ = check_random_state(random_state)\n- coef = random_.uniform(size=(context_dim, action_context_dim))\naction_coef = random_.uniform(size=action_context_dim)\n+ context_action_coef = random_.uniform(size=(context_dim, action_context_dim))\n- pi_b_logits = context_ @ coef @ action_context_.T + action_coef @ action_context_.T\n+ pi_b_logits = context_ @ context_action_coef @ action_context_.T\n+ pi_b_logits += action_coef @ action_context_.T\npi_b_logits = degree * (pi_b_logits - pi_b_logits.mean()) / pi_b_logits.std()\nreturn pi_b_logits\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | define base reward and behavior policy functions |
641,014 | 13.11.2021 03:30:28 | 18,000 | a499ec2f89046037657b79e78a5fd993edccd66c | add sparse reward functions | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -6,8 +6,10 @@ from obp.dataset.synthetic import linear_behavior_policy\nfrom obp.dataset.synthetic import linear_reward_function\nfrom obp.dataset.synthetic import logistic_polynomial_reward_function\nfrom obp.dataset.synthetic import logistic_reward_function\n+from obp.dataset.synthetic import logistic_sparse_reward_function\nfrom obp.dataset.synthetic import polynomial_behavior_policy\nfrom obp.dataset.synthetic import polynomial_reward_function\n+from obp.dataset.synthetic import sparse_reward_function\nfrom obp.dataset.synthetic import SyntheticBanditDataset\nfrom obp.dataset.synthetic_continuous import linear_behavior_policy_continuous\nfrom obp.dataset.synthetic_continuous import linear_reward_funcion_continuous\n@@ -28,8 +30,10 @@ __all__ = [\n\"SyntheticBanditDataset\",\n\"logistic_reward_function\",\n\"logistic_polynomial_reward_function\",\n+ \"logistic_sparse_reward_function\",\n\"linear_reward_function\",\n\"polynomial_reward_function\",\n+ \"sparse_reward_function\",\n\"linear_behavior_policy\",\n\"polynomial_behavior_policy\",\n\"MultiClassToBanditReduction\",\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -402,7 +402,7 @@ def logistic_polynomial_reward_function(\naction_context: np.ndarray,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n- \"\"\"Logistic mean reward function for binary rewards.\n+ \"\"\"Logistic mean reward function for binary rewards with polynomial feature transformations.\nNote\n------\n@@ -437,6 +437,48 @@ def logistic_polynomial_reward_function(\nreturn sigmoid(logits)\n+def logistic_sparse_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Logistic mean reward function for binary rewards with small effective feature dimension.\n+\n+ Note\n+ ------\n+ Polynomial and interaction features will be used to calculate the expected rewards.\n+ `sklearn.preprocessing.PolynomialFeatures(degree=4)` is applied to generate high-dimensional feature vector.\n+ After that, some dimensions will be dropped as irrelevant dimensions, producing sparse feature vector.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each data (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation of actions.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_reward: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`),\n+ i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+\n+ \"\"\"\n+ logits = _base_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ degree=4,\n+ effective_dim_ratio=0.3,\n+ random_state=random_state,\n+ )\n+\n+ return sigmoid(logits)\n+\n+\ndef linear_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n@@ -480,7 +522,7 @@ def polynomial_reward_function(\nNote\n------\nPolynomial and interaction features will be used to calculate the expected rewards.\n- Feature transformation is based on `sklearn.preprocessing.PolynomialFeatures(degree=3)`\n+ Feature transformation is based on `sklearn.preprocessing.PolynomialFeatures(degree=3)`.\nParameters\n-----------\n@@ -508,10 +550,51 @@ def polynomial_reward_function(\n)\n+def sparse_reward_function(\n+ context: np.ndarray,\n+ action_context: np.ndarray,\n+ random_state: Optional[int] = None,\n+) -> np.ndarray:\n+ \"\"\"Sparse mean reward function for continuous rewards.\n+\n+ Note\n+ ------\n+ Polynomial and interaction features will be used to calculate the expected rewards.\n+ `sklearn.preprocessing.PolynomialFeatures(degree=4)` is applied to generate high-dimensional feature vector.\n+ After that, some dimensions will be dropped as irrelevant dimensions, producing sparse feature vector.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors characterizing each data (such as user information).\n+\n+ action_context: array-like, shape (n_actions, dim_action_context)\n+ Vector representation of actions.\n+\n+ random_state: int, default=None\n+ Controls the random seed in sampling dataset.\n+\n+ Returns\n+ ---------\n+ expected_rewards: array-like, shape (n_rounds, n_actions)\n+ Expected reward given context (:math:`x`) and action (:math:`a`),\n+ i.e., :math:`q(x,a):=\\\\mathbb{E}[r|x,a]`.\n+\n+ \"\"\"\n+ return _base_reward_function(\n+ context=context,\n+ action_context=action_context,\n+ degree=4,\n+ effective_dim_ratio=0.3,\n+ random_state=random_state,\n+ )\n+\n+\ndef _base_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ndegree: int = 3,\n+ effective_dim_ratio: float = 1.0,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Base function to define mean reward functions.\n@@ -528,7 +611,8 @@ def _base_reward_function(\nwhere :math:`x` is a original context vector,\nand :math:`a` is a original action_context vector representing actions.\nPolynomial transformation is applied to original context and action vectors,\n- producing :math:`x \\\\in \\\\mathbb{R}^{d_X}` and :math:`\\\\tilde{a} \\\\in \\\\mathbb{R}^{d_A}`.\n+ producing :math:`\\\\tilde{x} \\\\in \\\\mathbb{R}^{d_X}` and :math:`\\\\tilde{a} \\\\in \\\\mathbb{R}^{d_A}`.\n+ Moreover, some dimensions of context and action_context might be randomly dropped according to `effective_dim_ratio`.\n:math:`M_{X,A} \\\\mathbb{R}^{d_X \\\\times d_A}`, :math:`\\\\theta_x \\\\in \\\\mathbb{R}^{d_X}`,\nand :math:`\\\\theta_a \\\\in \\\\mathbb{R}^{d_A}` are parameter matrix and vectors,\nall sampled from the uniform distribution.\n@@ -538,8 +622,10 @@ def _base_reward_function(\nCurrently, this function is used to define\n`obp.dataset.linear_reward function` (degree=1),\n`obp.dataset.polynomial_reward function` (degree=3),\n+ `obp.dataset.sparse_reward function` (degree=4, effective_dim_ratio=0.1),\n`obp.dataset.logistic_reward function` (degree=1),\n- and `obp.dataset.logistic_polynomial_reward_function` (degree=3).\n+ `obp.dataset.logistic_polynomial_reward_function` (degree=3),\n+ and `obp.dataset.logistic_sparse_reward_function` (degree=4, effective_dim_ratio=0.1).\nParameters\n-----------\n@@ -553,6 +639,11 @@ def _base_reward_function(\nSpecifies the maximal degree of the polynomial feature transformations\napplied to both `context` and `action_context`.\n+ effective_dim_ratio: int, default=1.0\n+ Propotion of context dimensions relevant to the expected rewards.\n+ Specifically, after the polynomial feature transformation is applied to the original context vectors,\n+ only `dim_context * effective_dim_ratio` number of relevant dimensions will be used to generate expected rewards.\n+\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\n@@ -564,25 +655,52 @@ def _base_reward_function(\n\"\"\"\ncheck_scalar(degree, \"degree\", int, min_val=1)\n+ check_scalar(\n+ effective_dim_ratio, \"effective_dim_ratio\", float, min_val=0, max_val=1\n+ )\ncheck_array(array=context, name=\"context\", expected_dim=2)\ncheck_array(array=action_context, name=\"action_context\", expected_dim=2)\npoly = PolynomialFeatures(degree=degree)\ncontext_ = poly.fit_transform(context)\naction_context_ = poly.fit_transform(action_context)\n- datasize, context_dim = context_.shape\n- n_actions, action_context_dim = action_context_.shape\n-\n+ datasize, dim_context = context_.shape\n+ n_actions, dim_action_context = action_context_.shape\nrandom_ = check_random_state(random_state)\n- context_coef_ = random_.uniform(-1, 1, size=context_dim)\n- action_coef_ = random_.uniform(-1, 1, size=action_context_dim)\n+\n+ if effective_dim_ratio < 1.0:\n+ effective_dim_context = np.maximum(\n+ np.int32(dim_context * effective_dim_ratio), 1\n+ )\n+ effective_dim_action_context = np.maximum(\n+ np.int32(dim_action_context * effective_dim_ratio), 1\n+ )\n+ effective_context_ = context_[\n+ :, random_.choice(dim_context, effective_dim_context, replace=False)\n+ ]\n+ effective_action_context_ = action_context_[\n+ :,\n+ random_.choice(\n+ dim_action_context, effective_dim_action_context, replace=False\n+ ),\n+ ]\n+ else:\n+ effective_dim_context = dim_context\n+ effective_dim_action_context = dim_action_context\n+ effective_context_ = context_\n+ effective_action_context_ = action_context_\n+\n+ context_coef_ = random_.uniform(-1, 1, size=effective_dim_context)\n+ action_coef_ = random_.uniform(-1, 1, size=effective_dim_action_context)\ncontext_action_coef_ = random_.uniform(\n- -1, 1, size=(context_dim, action_context_dim)\n+ -1, 1, size=(effective_dim_context, effective_dim_action_context)\n)\n- context_values = np.tile(context_ @ context_coef_, (n_actions, 1)).T\n- action_values = np.tile(action_coef_ @ action_context_.T, (datasize, 1))\n- context_action_values = context_ @ context_action_coef_ @ action_context_.T\n+ context_values = np.tile(effective_context_ @ context_coef_, (n_actions, 1)).T\n+ action_values = np.tile(action_coef_ @ effective_action_context_.T, (datasize, 1))\n+ context_action_values = (\n+ effective_context_ @ context_action_coef_ @ effective_action_context_.T\n+ )\nexpected_rewards = context_values + action_values + context_action_values\nexpected_rewards = (\ndegree * (expected_rewards - expected_rewards.mean()) / expected_rewards.std()\n@@ -682,7 +800,7 @@ def _base_behavior_policy_function(\nwhere :math:`x` is a original context vector,\nand :math:`a` is a original action_context vector representing actions.\nPolynomial transformation is applied to original context and action vectors,\n- producing :math:`x \\\\in \\\\mathbb{R}^{d_X}` and :math:`\\\\tilde{a} \\\\in \\\\mathbb{R}^{d_A}`.\n+ producing :math:`\\\\tilde{x} \\\\in \\\\mathbb{R}^{d_X}` and :math:`\\\\tilde{a} \\\\in \\\\mathbb{R}^{d_A}`.\n:math:`M_{X,A} \\\\mathbb{R}^{d_X \\\\times d_A}` and :math:`\\\\theta_a \\\\in \\\\mathbb{R}^{d_A}` are\nparameter matrix and vector, each sampled from the uniform distribution.\nThe softmax function will be applied to :math:`f_b(x,\\\\cdot)` in `obp.dataset.SyntheticDataset`\n@@ -721,12 +839,12 @@ def _base_behavior_policy_function(\npoly = PolynomialFeatures(degree=degree)\ncontext_ = poly.fit_transform(context)\naction_context_ = poly.fit_transform(action_context)\n- context_dim = context_.shape[1]\n- action_context_dim = action_context_.shape[1]\n+ dim_context = context_.shape[1]\n+ dim_action_context = action_context_.shape[1]\nrandom_ = check_random_state(random_state)\n- action_coef = random_.uniform(size=action_context_dim)\n- context_action_coef = random_.uniform(size=(context_dim, action_context_dim))\n+ action_coef = random_.uniform(size=dim_action_context)\n+ context_action_coef = random_.uniform(size=(dim_context, dim_action_context))\npi_b_logits = context_ @ context_action_coef @ action_context_.T\npi_b_logits += action_coef @ action_context_.T\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -6,8 +6,10 @@ from obp.dataset.synthetic import linear_behavior_policy\nfrom obp.dataset.synthetic import linear_reward_function\nfrom obp.dataset.synthetic import logistic_polynomial_reward_function\nfrom obp.dataset.synthetic import logistic_reward_function\n+from obp.dataset.synthetic import logistic_sparse_reward_function\nfrom obp.dataset.synthetic import polynomial_behavior_policy\nfrom obp.dataset.synthetic import polynomial_reward_function\n+from obp.dataset.synthetic import sparse_reward_function\nfrom obp.utils import softmax\n@@ -251,6 +253,7 @@ def test_synthetic_logistic_reward_function():\nfor logistic_reward_function_ in [\nlogistic_reward_function,\nlogistic_polynomial_reward_function,\n+ logistic_sparse_reward_function,\n]:\n# context\nwith pytest.raises(ValueError):\n@@ -294,6 +297,7 @@ def test_synthetic_continuous_reward_function():\nfor continuous_reward_function in [\nlinear_reward_function,\npolynomial_reward_function,\n+ sparse_reward_function,\n]:\n# context\nwith pytest.raises(ValueError):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_offline_estimation_performance.py",
"new_path": "tests/ope/test_offline_estimation_performance.py",
"diff": "@@ -60,7 +60,7 @@ base_model_dict = dict(\noffline_experiment_configurations = [\n(\n- 800,\n+ 600,\n10,\n5,\n\"logistic_regression\",\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add sparse reward functions |
641,006 | 18.11.2021 17:06:54 | -32,400 | 55653a8b6444774705fa88bf6b889b024e1ea5ce | initial commit of b-ope | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -7,6 +7,7 @@ from obp.ope.estimators import ReplayMethod\nfrom obp.ope.estimators import SelfNormalizedDoublyRobust\nfrom obp.ope.estimators import SelfNormalizedInverseProbabilityWeighting\nfrom obp.ope.estimators import SwitchDoublyRobust\n+from obp.ope.estimators import BalancedInverseProbabilityWeighting\nfrom obp.ope.estimators_continuous import (\nKernelizedSelfNormalizedInverseProbabilityWeighting,\n)\n@@ -57,6 +58,7 @@ __all__ = [\n\"SelfNormalizedSlateRewardInteractionIPS\",\n\"SelfNormalizedSlateIndependentIPS\",\n\"SelfNormalizedSlateStandardIPS\",\n+ \"BalancedInverseProbabilityWeighting\",\n\"BaseContinuousOffPolicyEstimator\",\n\"KernelizedInverseProbabilityWeighting\",\n\"KernelizedSelfNormalizedInverseProbabilityWeighting\",\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | initial commit of b-ope |
641,006 | 26.11.2021 21:09:52 | -32,400 | dd22feac7350a35f9d96c495e9a5cbb8748911a8 | wip (balanced ope) | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -32,6 +32,7 @@ from obp.ope.meta import OffPolicyEvaluation\nfrom obp.ope.meta_continuous import ContinuousOffPolicyEvaluation\nfrom obp.ope.meta_slate import SlateOffPolicyEvaluation\nfrom obp.ope.regression_model import RegressionModel\n+from obp.ope.classification_model import ImportanceSampler\n__all__ = [\n@@ -59,6 +60,7 @@ __all__ = [\n\"SelfNormalizedSlateIndependentIPS\",\n\"SelfNormalizedSlateStandardIPS\",\n\"BalancedInverseProbabilityWeighting\",\n+ \"ImportanceSampler\",\n\"BaseContinuousOffPolicyEstimator\",\n\"KernelizedInverseProbabilityWeighting\",\n\"KernelizedSelfNormalizedInverseProbabilityWeighting\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "obp/ope/classification_model.py",
"diff": "+# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.\n+# Licensed under the Apache 2.0 License.\n+\n+\"\"\"Regression Model Class for Estimating Mean Reward Functions.\"\"\"\n+from dataclasses import dataclass\n+from typing import Optional\n+\n+import numpy as np\n+from sklearn.base import BaseEstimator\n+from sklearn.base import clone\n+from sklearn.model_selection import KFold\n+from sklearn.utils import check_random_state\n+from sklearn.utils import check_scalar\n+from sklearn.calibration import CalibratedClassifierCV\n+\n+from ..utils import check_bandit_feedback_inputs, sample_action_fast\n+\n+\n+@dataclass\n+class ImportanceSampler(BaseEstimator):\n+ \"\"\"Machine learning model to distinguish between the behavior and evaluation policy (:math:`\\\\Pr[C=1 | x, a]`),\n+ where :math:`C` equals to 1 if the action is sampled by evaluation policy.\n+\n+ Parameters\n+ ------------\n+ base_model: BaseEstimator\n+ A machine learning model used to estimate the mean reward function.\n+\n+ n_actions: int\n+ Number of actions.\n+\n+ len_list: int, default=1\n+ Length of a list of actions recommended in each impression.\n+ When Open Bandit Dataset is used, 3 should be set.\n+\n+ action_context: array-like, shape (n_actions, dim_action_context), default=None\n+ Context vector characterizing action (i.e., vector representation of each action).\n+ If not given, one-hot encoding of the action variable is used as default.\n+\n+ fitting_method: str, default='weighted_loss'\n+ Method to fit the regression model.\n+ Must be one of ['weighted_loss', 'sample', 'raw']. Each method is defined as follows:\n+ - weighted_loss: for each round, n_actions rows are duplicated. For each duplicated row, action features are represented by one-hot encoding of each action. Classification models are trained with sample_weight, where sample_weight is the probability that the corresponding action is sampled (action_dist_at_position[:, action_idx]).\n+ - sample: actions are sampled by applying Gumbel-softmax trick to action_dist_at_position, and action features are represented by one-hot encoding of the sampled action.\n+ - raw: action_dist_at_position are directly encoded as action features.\n+\n+ References\n+ -----------\n+ Arjun Sondhi, David Arbour, and Drew Dimmery\n+ \"Balanced Off-Policy Evaluation in General Action Spaces.\", 2020.\n+\n+ \"\"\"\n+\n+ base_model: BaseEstimator\n+ n_actions: int\n+ len_list: int = 1\n+ action_context: Optional[np.ndarray] = None\n+ fitting_method: str = \"weighted_loss\"\n+ fitting_random_state: Optional[int] = None\n+ calibration_cv: int = 2\n+\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize Class.\"\"\"\n+ check_scalar(self.n_actions, \"n_actions\", int, min_val=2)\n+ check_scalar(self.len_list, \"len_list\", int, min_val=1)\n+ if not (\n+ isinstance(self.fitting_method, str)\n+ and self.fitting_method in [\"weighted_loss\", \"sample\", \"raw\"]\n+ ):\n+ raise ValueError(\n+ f\"fitting_method must be one of 'weighted_loss', 'sample', or 'raw', but {self.fitting_method} is given\"\n+ )\n+ if not isinstance(self.base_model, BaseEstimator):\n+ raise ValueError(\n+ \"base_model must be BaseEstimator or a child class of BaseEstimator\"\n+ )\n+\n+ if self.calibration_cv > 0:\n+ self.base_model_list = [\n+ clone(\n+ CalibratedClassifierCV(\n+ base_estimator=self.base_model, cv=self.calibration_cv\n+ ),\n+ )\n+ for _ in np.arange(self.len_list)\n+ ]\n+ else:\n+ self.base_model_list = [\n+ clone(self.base_model) for _ in np.arange(self.len_list)\n+ ]\n+ if self.action_context is None:\n+ self.action_context = np.eye(self.n_actions, dtype=int)\n+\n+ def fit(\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ action_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ ) -> None:\n+ \"\"\"Fit the regression model on given logged bandit feedback data.\n+\n+ Parameters\n+ ----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors observed in each round of the logged bandit feedback, i.e., :math:`x_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ reward: array-like, shape (n_rounds,)\n+ Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\pi_b(a_t|x_t)`.\n+ When None is given, behavior policy is assumed to be uniform.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Position of recommendation interface where action was presented in each round of the given logged bandit data.\n+ If None is given, a regression model assumes that there is only one position.\n+ When `len_list` > 1, this position argument has to be set.\n+\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ When either of 'iw' or 'mrdr' is used as the 'fitting_method' argument, then `action_dist` must be given.\n+\n+ \"\"\"\n+ # check_bandit_feedback_inputs(\n+ # context=context,\n+ # action=action,\n+ # reward=reward,\n+ # pscore=pscore,\n+ # position=position,\n+ # action_context=self.action_context,\n+ # )\n+ n_rounds = context.shape[0]\n+\n+ if position is None or self.len_list == 1:\n+ position = np.zeros_like(action)\n+ else:\n+ if position.max() >= self.len_list:\n+ raise ValueError(\n+ f\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n+ )\n+ if not (isinstance(action_dist, np.ndarray) and action_dist.ndim == 3):\n+ raise ValueError(\n+ \"when fitting_method is either 'iw' or 'mrdr', action_dist (a 3-dimensional ndarray) must be given\"\n+ )\n+ if action_dist.shape != (n_rounds, self.n_actions, self.len_list):\n+ raise ValueError(\n+ f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list}), but is {action_dist.shape}\"\n+ )\n+ if not np.allclose(action_dist.sum(axis=1), 1):\n+ raise ValueError(\"action_dist must be a probability distribution\")\n+\n+ # If self.fitting_method != \"sample\", `sampled_action` has no information\n+ sampled_action = np.zeros((n_rounds, self.n_actions, self.len_list))\n+ if self.fitting_method == \"sample\":\n+ for position_ in np.arange(self.len_list):\n+ idx = position == position_\n+ sampled_action_at_position = sample_action_fast(\n+ action_dist=action_dist[idx][:, :, position_],\n+ random_state=self.fitting_random_state,\n+ )\n+ sampled_action[\n+ idx,\n+ sampled_action_at_position,\n+ position_,\n+ ] = 1\n+\n+ for position_ in np.arange(self.len_list):\n+ idx = position == position_\n+ action_dist_at_position = action_dist[idx][:, :, position_]\n+ X, y, sample_weight = self._pre_process_for_clf_model(\n+ context=context[idx],\n+ action=action[idx],\n+ action_dist_at_position=action_dist_at_position,\n+ sampled_action_at_position=sampled_action[idx][:, :, position_],\n+ )\n+ if X.shape[0] == 0:\n+ raise ValueError(f\"No training data at position {position_}\")\n+ self.base_model_list[position_].fit(X, y, sample_weight=sample_weight)\n+\n+ def predict(\n+ self,\n+ action: np.ndarray,\n+ context: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ ) -> np.ndarray:\n+ proba_of_evaluation_policy = np.zeros(action.shape[0])\n+ for position_ in np.arange(self.len_list):\n+ idx = position == position_\n+ X, _, _ = self._pre_process_for_clf_model(\n+ context=context[idx],\n+ action=action[idx],\n+ is_prediction=True,\n+ )\n+ proba_of_evaluation_policy[idx] = self.base_model_list[\n+ position_\n+ ].predict_proba(X)[:, 1]\n+ return proba_of_evaluation_policy / (1 - proba_of_evaluation_policy)\n+\n+ def fit_predict(\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ action_dist: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ n_folds: int = 1,\n+ random_state: Optional[int] = None,\n+ is_eval_model: bool = False,\n+ ) -> np.ndarray:\n+ \"\"\"Fit the regression model on given logged bandit feedback data and predict the reward function of the same data.\n+\n+ Note\n+ ------\n+ When `n_folds` is larger than 1, then the cross-fitting procedure is applied.\n+ See the reference for the details about the cross-fitting technique.\n+\n+ Parameters\n+ ----------\n+ context: array-like, shape (n_rounds, dim_context)\n+ Context vectors observed in each round of the logged bandit feedback, i.e., :math:`x_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ reward: array-like, shape (n_rounds,)\n+ Observed rewards (or outcome) in each round, i.e., :math:`r_t`.\n+\n+ pscore: array-like, shape (n_rounds,), default=None\n+ Action choice probabilities (propensity score) of a behavior policy\n+ in the training logged bandit feedback.\n+ When None is given, the the behavior policy is assumed to be a uniform one.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Position of recommendation interface where action was presented in each round of the given logged bandit data.\n+ If None is given, a regression model assumes that there is only one position.\n+ When `len_list` > 1, this position argument has to be set.\n+\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list), default=None\n+ Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ When either of 'iw' or 'mrdr' is used as the 'fitting_method' argument, then `action_dist` must be given.\n+\n+ n_folds: int, default=1\n+ Number of folds in the cross-fitting procedure.\n+ When 1 is given, the regression model is trained on the whole logged bandit feedback data.\n+ Please refer to https://arxiv.org/abs/2002.08536 about the details of the cross-fitting procedure.\n+\n+ random_state: int, default=None\n+ `random_state` affects the ordering of the indices, which controls the randomness of each fold.\n+ See https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html for the details.\n+\n+ Returns\n+ -----------\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\n+ Expected rewards of new data estimated by the regression model.\n+\n+ \"\"\"\n+ # check_bandit_feedback_inputs(\n+ # context=context,\n+ # action=action,\n+ # reward=reward,\n+ # pscore=pscore,\n+ # position=position,\n+ # action_context=self.action_context,\n+ # )\n+ n_rounds = context.shape[0]\n+\n+ check_scalar(n_folds, \"n_folds\", int, min_val=1)\n+ check_random_state(random_state)\n+\n+ if position is None or self.len_list == 1:\n+ position = np.zeros_like(action)\n+ else:\n+ if position.max() >= self.len_list:\n+ raise ValueError(\n+ f\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n+ )\n+ if not (isinstance(action_dist, np.ndarray) and action_dist.ndim == 3):\n+ raise ValueError(\n+ \"when fitting_method is either 'iw' or 'mrdr', action_dist (a 3-dimensional ndarray) must be given\"\n+ )\n+ if action_dist.shape != (n_rounds, self.n_actions, self.len_list):\n+ raise ValueError(\n+ f\"shape of action_dist must be (n_rounds, n_actions, len_list)=({n_rounds, self.n_actions, self.len_list}), but is {action_dist.shape}\"\n+ )\n+ if not np.allclose(action_dist.sum(axis=1), 1):\n+ raise ValueError(\"action_dist must be a probability distribution\")\n+\n+ if n_folds == 1:\n+ self.fit(\n+ context=context,\n+ action=action,\n+ position=position,\n+ action_dist=action_dist,\n+ )\n+ return self.predict(context=context, action=action, position=position)\n+ else:\n+ balancing_weight = np.zeros(n_rounds)\n+ kf = KFold(n_splits=n_folds, shuffle=True, random_state=random_state)\n+ kf.get_n_splits(context)\n+ if is_eval_model:\n+ self.eval_result = {\"y\": [], \"proba\": [], \"sample_weight\": []}\n+ for train_idx, test_idx in kf.split(context):\n+ self.fit(\n+ context=context[train_idx],\n+ action=action[train_idx],\n+ position=position[train_idx],\n+ action_dist=action_dist[train_idx],\n+ )\n+ balancing_weight[test_idx] = self.predict(\n+ context=context[test_idx],\n+ action=action[test_idx],\n+ position=position[test_idx],\n+ )\n+ if is_eval_model:\n+ sampled_action = np.zeros(\n+ (test_idx.shape[0], self.n_actions, self.len_list)\n+ )\n+ if self.fitting_method == \"sample\":\n+ for position_ in np.arange(self.len_list):\n+ idx = position[test_idx] == position_\n+ sampled_action_at_position = sample_action_fast(\n+ action_dist=action_dist[test_idx][idx][:, :, position_],\n+ random_state=self.fitting_random_state,\n+ )\n+ sampled_action[\n+ idx,\n+ sampled_action_at_position,\n+ position_,\n+ ] = 1\n+ for position_ in np.arange(self.len_list):\n+ idx = position[test_idx] == position_\n+ action_dist_at_position = action_dist[test_idx][idx][\n+ :, :, position_\n+ ]\n+ X, y, sample_weight = self._pre_process_for_clf_model(\n+ context=context[test_idx][idx],\n+ action=action[test_idx][idx],\n+ action_dist_at_position=action_dist_at_position,\n+ sampled_action_at_position=sampled_action[idx][:, :, position_],\n+ )\n+ proba_of_evaluation_policy = self.base_model_list[\n+ position_\n+ ].predict_proba(X)[:, 1]\n+ self.eval_result[\"proba\"].append(proba_of_evaluation_policy)\n+ self.eval_result[\"y\"].append(y)\n+ self.eval_result[\"sample_weight\"].append(sample_weight)\n+ return balancing_weight\n+\n+ def _pre_process_for_clf_model(\n+ self,\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ action_dist_at_position: Optional[np.ndarray] = None,\n+ sampled_action_at_position: Optional[np.ndarray] = None,\n+ is_prediction: bool = False,\n+ ) -> np.ndarray:\n+ \"\"\"Preprocess feature vectors to train a regression model.\n+\n+ Note\n+ -----\n+ Please override this method if you want to use another feature enginnering\n+ for training the regression model.\n+\n+ Parameters\n+ -----------\n+ context: array-like, shape (n_rounds,)\n+ Context vectors observed in each round of the logged bandit feedback, i.e., :math:`x_t`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n+\n+ action_context: array-like, shape shape (n_actions, dim_action_context)\n+ Context vector characterizing action (i.e., vector representation of each action).\n+\n+ \"\"\"\n+ behavior_feature = np.c_[context, self.action_context[action]]\n+ if is_prediction:\n+ return behavior_feature, None, None\n+ if self.fitting_method == \"weighted_loss\":\n+ X = np.copy(behavior_feature)\n+ y = np.zeros(X.shape[0], dtype=int)\n+ sample_weight = np.ones(X.shape[0])\n+ for action_idx in np.arange(self.n_actions):\n+ tmp_action = np.ones(context.shape[0], dtype=int) * action_idx\n+ evaluation_feature = np.c_[context, self.action_context[tmp_action]]\n+ X = np.r_[X, evaluation_feature]\n+ y = np.r_[y, np.ones(evaluation_feature.shape[0], dtype=int)]\n+ sample_weight = np.r_[\n+ sample_weight, action_dist_at_position[:, action_idx]\n+ ]\n+ else:\n+ if self.fitting_method == \"raw\":\n+ evaluation_feature = np.c_[context, action_dist_at_position]\n+ elif self.fitting_method == \"sample\":\n+ evaluation_feature = np.c_[context, sampled_action_at_position]\n+ X = np.copy(behavior_feature)\n+ y = np.zeros(X.shape[0], dtype=int)\n+ X = np.r_[X, evaluation_feature]\n+ y = np.r_[y, np.ones(evaluation_feature.shape[0], dtype=int)]\n+ sample_weight = None\n+ return X, y, sample_weight\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -1773,8 +1773,6 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nlen_list: int\nn_actions: int\n- fit_random_state: int\n- base_model: BaseEstimator\nlambda_: float = np.inf\nestimator_name: str = \"b-ipw\"\n@@ -1788,9 +1786,6 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\n)\nif self.lambda_ != self.lambda_:\nraise ValueError(\"lambda_ must not be nan\")\n- self.base_model_list = [\n- clone(self.base_model) for _ in np.arange(self.len_list)\n- ]\ndef fit(\nself,\n@@ -1985,6 +1980,7 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\naction: np.ndarray,\naction_dist: np.ndarray,\ncontext: np.ndarray,\n+ balancing_weight: np.ndarray,\nposition: Optional[np.ndarray] = None,\naction_context: Optional[np.ndarray] = None,\n**kwargs,\n@@ -2020,12 +2016,6 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\ncheck_array(array=action, name=\"action\", expected_dim=1)\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\n- balancing_weight = self.predict(\n- context=context,\n- action=action,\n- action_context=action_context,\n- position=position,\n- )\nreturn self._estimate_round_rewards(\nreward=reward,\naction=action,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | wip (balanced ope) |
641,006 | 29.11.2021 00:56:35 | -32,400 | 3257831052ad9aae82b2caa1b5ff8f7d1d4f3b18 | add estimated_pscore and fix past test | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/classification_model.py",
"new_path": "obp/ope/classification_model.py",
"diff": "@@ -261,7 +261,7 @@ class ImportanceSampler(BaseEstimator):\n`random_state` affects the ordering of the indices, which controls the randomness of each fold.\nSee https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html for the details.\n- is_eval_model: boolean, default=False\n+ is_eval_model: bool, default=False\nWhether the performance of the classification model is evaluated or not.\nWhen True is given, the predicted probability of the classification model and the true label of each fold is saved in `self.eval_result[fold]`\n@@ -309,6 +309,7 @@ class ImportanceSampler(BaseEstimator):\naction=action,\nposition=position,\naction_dist=action_dist,\n+ random_state=random_state,\n)\nreturn self.predict(context=context, action=action, position=position)\nelse:\n@@ -323,6 +324,7 @@ class ImportanceSampler(BaseEstimator):\naction=action[train_idx],\nposition=position[train_idx],\naction_dist=action_dist[train_idx],\n+ random_state=random_state,\n)\nimportance_sampling_ratio[test_idx] = self.predict(\ncontext=context[test_idx],\n@@ -595,7 +597,7 @@ class PropensityScoreEstimator(BaseEstimator):\n`random_state` affects the ordering of the indices, which controls the randomness of each fold.\nSee https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html for the details.\n- is_eval_model: boolean, default=False\n+ is_eval_model: bool, default=False\nWhether the performance of the classification model is evaluated or not.\nWhen True is given, the predicted probability of the classification model and the true label of each fold is saved in `self.eval_result[fold]`\n@@ -629,6 +631,7 @@ class PropensityScoreEstimator(BaseEstimator):\ncontext=context,\naction=action,\nposition=position,\n+ random_state=random_state,\n)\nreturn self.predict(context=context, action=action, position=position)\nelse:\n@@ -642,6 +645,7 @@ class PropensityScoreEstimator(BaseEstimator):\ncontext=context[train_idx],\naction=action[train_idx],\nposition=position[train_idx],\n+ random_state=random_state,\n)\npscore[test_idx] = self.predict(\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -449,6 +449,7 @@ class DoublyRobustTuning(BaseOffPolicyEstimatorTuning):\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\nposition: Optional[np.ndarray] = None,\n+ **kwargs,\n) -> float:\n\"\"\"Estimate the policy value of evaluation policy with a tuned hyperparameter.\n@@ -628,6 +629,7 @@ class SwitchDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\nposition: Optional[np.ndarray] = None,\n+ **kwargs,\n) -> float:\n\"\"\"Estimate the policy value of evaluation policy with a tuned hyperparameter.\n@@ -807,6 +809,7 @@ class DoublyRobustWithShrinkageTuning(BaseOffPolicyEstimatorTuning):\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\nposition: Optional[np.ndarray] = None,\n+ **kwargs,\n) -> float:\n\"\"\"Estimate the policy value of evaluation policy with a tuned hyperparameter.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta.py",
"new_path": "obp/ope/meta.py",
"diff": "@@ -89,7 +89,7 @@ class OffPolicyEvaluation:\ndef __post_init__(self) -> None:\n\"\"\"Initialize class.\"\"\"\n- for key_ in [\"action\", \"position\", \"reward\", \"pscore\"]:\n+ for key_ in [\"action\", \"position\", \"reward\"]:\nif key_ not in self.bandit_feedback:\nraise RuntimeError(f\"Missing key of {key_} in 'bandit_feedback'.\")\nself.ope_estimators_ = dict()\n@@ -105,6 +105,7 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model: Optional[\nUnion[np.ndarray, Dict[str, np.ndarray]]\n] = None,\n+ estimated_pscore: Optional[np.ndarray] = None,\n) -> Dict[str, Dict[str, np.ndarray]]:\n\"\"\"Create input dictionary to estimate policy value using subclasses of `BaseOffPolicyEstimator`\"\"\"\ncheck_array(array=action_dist, name=\"action_dist\", expected_dim=3)\n@@ -125,15 +126,28 @@ class OffPolicyEvaluation:\nraise ValueError(\n\"Expected `estimated_rewards_by_reg_model.shape == action_dist.shape`, but found it False\"\n)\n+ if estimated_pscore is not None:\n+ check_array(estimated_pscore, \"estimated_pscore\", expected_dim=1)\n+ if estimated_pscore.shape[0] != action_dist.shape[0]:\n+ raise ValueError(\n+ \"Expected `estimated_pscore.shape[0] == action_dist.shape[0]`, but found it False\"\n+ )\nestimator_inputs = {\nestimator_name: {\ninput_: self.bandit_feedback[input_]\n- for input_ in [\"reward\", \"action\", \"position\", \"pscore\"]\n+ for input_ in [\"reward\", \"action\", \"position\"]\n}\nfor estimator_name in self.ope_estimators_\n}\nfor estimator_name in self.ope_estimators_:\n+ if \"pscore\" in self.bandit_feedback:\n+ estimator_inputs[estimator_name][\"pscore\"] = self.bandit_feedback[\n+ \"pscore\"\n+ ]\n+ else:\n+ estimator_inputs[estimator_name][\"pscore\"] = None\n+ estimator_inputs[estimator_name][\"estimated_pscore\"] = estimated_pscore\nestimator_inputs[estimator_name][\"action_dist\"] = action_dist\nif isinstance(estimated_rewards_by_reg_model, dict):\nif estimator_name in estimated_rewards_by_reg_model:\n@@ -157,6 +171,7 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model: Optional[\nUnion[np.ndarray, Dict[str, np.ndarray]]\n] = None,\n+ estimated_pscore: Optional[np.ndarray] = None,\n) -> Dict[str, float]:\n\"\"\"Estimate the policy value of evaluation policy.\n@@ -171,6 +186,9 @@ class OffPolicyEvaluation:\nWhen a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nReturns\n----------\npolicy_value_dict: Dict[str, float]\n@@ -187,12 +205,12 @@ class OffPolicyEvaluation:\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\npolicy_value_dict[estimator_name] = estimator.estimate_policy_value(\n**estimator_inputs[estimator_name]\n)\n-\nreturn policy_value_dict\ndef estimate_intervals(\n@@ -201,6 +219,7 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model: Optional[\nUnion[np.ndarray, Dict[str, np.ndarray]]\n] = None,\n+ estimated_pscore: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 100,\nrandom_state: Optional[int] = None,\n@@ -218,6 +237,9 @@ class OffPolicyEvaluation:\nWhen a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nalpha: float, default=0.05\nSignificance level.\n@@ -249,6 +271,7 @@ class OffPolicyEvaluation:\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\npolicy_value_interval_dict[estimator_name] = estimator.estimate_interval(\n@@ -266,6 +289,7 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model: Optional[\nUnion[np.ndarray, Dict[str, np.ndarray]]\n] = None,\n+ estimated_pscore: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 100,\nrandom_state: Optional[int] = None,\n@@ -283,6 +307,9 @@ class OffPolicyEvaluation:\nWhen a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nalpha: float, default=0.05\nSignificance level.\n@@ -302,6 +329,7 @@ class OffPolicyEvaluation:\nself.estimate_policy_values(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n),\nindex=[\"estimated_policy_value\"],\n)\n@@ -309,6 +337,7 @@ class OffPolicyEvaluation:\nself.estimate_intervals(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -333,6 +362,7 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model: Optional[\nUnion[np.ndarray, Dict[str, np.ndarray]]\n] = None,\n+ estimated_pscore: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nis_relative: bool = False,\nn_bootstrap_samples: int = 100,\n@@ -353,6 +383,9 @@ class OffPolicyEvaluation:\nWhen a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nalpha: float, default=0.05\nSignificance level.\n@@ -383,6 +416,7 @@ class OffPolicyEvaluation:\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\nestimated_round_rewards_dict[\n@@ -422,6 +456,7 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model: Optional[\nUnion[np.ndarray, Dict[str, np.ndarray]]\n] = None,\n+ estimated_pscore: Optional[np.ndarray] = None,\nmetric: str = \"relative-ee\",\n) -> Dict[str, float]:\n\"\"\"Evaluate estimation performance of OPE estimators.\n@@ -456,6 +491,9 @@ class OffPolicyEvaluation:\nWhen a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nmetric: str, default=\"relative-ee\"\nEvaluation metric used to evaluate and compare the estimation performance of OPE estimators.\nMust be \"relative-ee\" or \"se\".\n@@ -484,6 +522,7 @@ class OffPolicyEvaluation:\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\nestimated_policy_value = estimator.estimate_policy_value(\n@@ -505,6 +544,7 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model: Optional[\nUnion[np.ndarray, Dict[str, np.ndarray]]\n] = None,\n+ estimated_pscore: Optional[np.ndarray] = None,\nmetric: str = \"relative-ee\",\n) -> DataFrame:\n\"\"\"Summarize performance comparisons of OPE estimators.\n@@ -522,6 +562,9 @@ class OffPolicyEvaluation:\nExpected rewards given context, action, and position estimated by regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nmetric: str, default=\"relative-ee\"\nEvaluation metric used to evaluate and compare the estimation performance of OPE estimators.\nMust be either \"relative-ee\" or \"se\".\n@@ -550,6 +593,7 @@ class OffPolicyEvaluation:\nestimated_rewards_by_reg_model: Optional[\nUnion[np.ndarray, Dict[str, np.ndarray]]\n] = None,\n+ estimated_pscore: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nis_relative: bool = False,\nn_bootstrap_samples: int = 100,\n@@ -573,6 +617,9 @@ class OffPolicyEvaluation:\nWhen a dict is given, if the dict has the name of an estimator as a key, the corresponding value is used.\nWhen it is not given, model-dependent estimators such as DM and DR cannot be used.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nalpha: float, default=0.05\nSignificance level.\n@@ -611,6 +658,7 @@ class OffPolicyEvaluation:\nestimator_inputs = self._create_estimator_inputs(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\nfor estimator_name, estimator in self.ope_estimators_.items():\nestimated_round_rewards_dict[estimator_name][\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_meta.py",
"new_path": "tests/ope/test_meta.py",
"diff": "@@ -133,6 +133,7 @@ class InverseProbabilityWeightingMock(BaseOffPolicyEstimator):\nposition: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\n+ estimated_pscore: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate the policy value of evaluation policy.\n@@ -154,6 +155,9 @@ class InverseProbabilityWeightingMock(BaseOffPolicyEstimator):\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nReturns\n----------\nmock_policy_value: float\n@@ -168,6 +172,7 @@ class InverseProbabilityWeightingMock(BaseOffPolicyEstimator):\nposition: np.ndarray,\npscore: np.ndarray,\naction_dist: np.ndarray,\n+ estimated_pscore: Optional[np.ndarray] = None,\nalpha: float = 0.05,\nn_bootstrap_samples: int = 10000,\nrandom_state: Optional[int] = None,\n@@ -193,6 +198,9 @@ class InverseProbabilityWeightingMock(BaseOffPolicyEstimator):\nAction choice probabilities\nby the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n+ estimated_pscore: array-like, shape (n_rounds,), default=None\n+ Estimated value of action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n+\nalpha: float, default=0.05\nSignificance level.\n@@ -236,7 +244,7 @@ def test_meta_post_init(synthetic_bandit_feedback: BanditFeedback) -> None:\n\"ipw3\": ipw3,\n}, \"__post_init__ returns a wrong value\"\n# __post__init__ raises RuntimeError when necessary_keys are not included in the bandit_feedback\n- necessary_keys = [\"action\", \"position\", \"reward\", \"pscore\"]\n+ necessary_keys = [\"action\", \"position\", \"reward\"]\nfor i in range(len(necessary_keys)):\nfor deleted_keys in itertools.combinations(necessary_keys, i + 1):\ninvalid_bandit_feedback_dict = {key: \"_\" for key in necessary_keys}\n@@ -393,6 +401,7 @@ def test_meta_create_estimator_inputs_using_valid_input_data(\n\"position\",\n\"action_dist\",\n\"estimated_rewards_by_reg_model\",\n+ \"estimated_pscore\",\n]\n), f\"Invalid response of _create_estimator_inputs (test case: {description})\"\n# _create_estimator_inputs function is called in the following functions\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add estimated_pscore and fix past test |
641,006 | 29.11.2021 14:06:15 | -32,400 | 0774cfd933f07edce9d313e5a5f2fa473855927a | fix test meta | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_meta.py",
"new_path": "tests/ope/test_meta.py",
"diff": "@@ -402,6 +402,7 @@ def test_meta_create_estimator_inputs_using_valid_input_data(\n\"action_dist\",\n\"estimated_rewards_by_reg_model\",\n\"estimated_pscore\",\n+ \"importance_sampling_ratio\",\n]\n), f\"Invalid response of _create_estimator_inputs (test case: {description})\"\n# _create_estimator_inputs function is called in the following functions\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix test meta |
641,014 | 19.12.2021 22:05:16 | 18,000 | dffcafbb020d081e4a71c13adbc54a95c8352e0f | implement SLOPE for hyperparam tuning of ope | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -18,6 +18,7 @@ from .estimators import DoublyRobust\nfrom .estimators import DoublyRobustWithShrinkage\nfrom .estimators import InverseProbabilityWeighting\nfrom .estimators import SwitchDoublyRobust\n+from .helper import estimate_student_t_lower_bound\n@dataclass\n@@ -31,6 +32,12 @@ class BaseOffPolicyEstimatorTuning:\nlambdas: List[float]\nA list of candidate hyperparameter values.\n+ tuning_method: str, default=\"slope\".\n+ A method used to tune the hyperparameter of an OPE estimator.\n+ Must be either of \"slope\" or \"mse\".\n+ Note that the implementation of \"slope\" is based on SLOPE++ proposed by Tucker and Lee.(2021),\n+ which improves the original SLOPE proposed by Su et al.(2020).\n+\nuse_bias_upper_bound: bool, default=True\nWhether to use bias upper bound in hyperparameter tuning.\nIf False, direct bias estimator is used to estimate the MSE.\n@@ -46,10 +53,17 @@ class BaseOffPolicyEstimatorTuning:\nYi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.\n\"Doubly Robust Off-Policy Evaluation with Shrinkage.\", 2020.\n+ Yi Su, Pavithra Srinath, and Akshay Krishnamurthy.\n+ \"Adaptive Estimator Selection for Off-Policy Evaluation.\", 2020.\n+\n+ George Tucker and Jonathan Lee.\n+ \"Improved Estimator Selection for Off-Policy Evaluation.\", 2021.\n+\n\"\"\"\nbase_ope_estimator: BaseOffPolicyEstimator = field(init=False)\nlambdas: List[float] = None\n+ tuning_method: str = \"slope\"\nuse_bias_upper_bound: bool = True\ndelta: float = 0.05\n@@ -76,14 +90,19 @@ class BaseOffPolicyEstimatorTuning:\ndef _check_init_inputs(self) -> None:\n\"\"\"Initialize Class.\"\"\"\n+ if self.tuning_method not in [\"slope\", \"mse\"]:\n+ raise ValueError(\n+ \"`tuning_method` must be either 'slope' or 'mse'\"\n+ f\", but {self.tuning_method} is given\"\n+ )\nif not isinstance(self.use_bias_upper_bound, bool):\nraise TypeError(\n\"`use_bias_upper_bound` must be a bool\"\n- \", but {type(self.use_bias_upper_bound)} is given\"\n+ f\", but {type(self.use_bias_upper_bound)} is given\"\n)\ncheck_scalar(self.delta, \"delta\", (float), min_val=0.0, max_val=1.0)\n- def _tune_hyperparam(\n+ def _tune_hyperparam_with_mse(\nself,\nreward: np.ndarray,\naction: np.ndarray,\n@@ -91,8 +110,8 @@ class BaseOffPolicyEstimatorTuning:\naction_dist: np.ndarray,\nestimated_rewards_by_reg_model: Optional[np.ndarray] = None,\nposition: Optional[np.ndarray] = None,\n- ) -> None:\n- \"\"\"Find the best hyperparameter value from the given candidate set.\"\"\"\n+ ) -> float:\n+ \"\"\"Find the best hyperparameter value from the candidate set by estimating the mse.\"\"\"\nself.estimated_mse_score_dict = dict()\nfor hyperparam_ in self.lambdas:\nestimated_mse_score = self.base_ope_estimator(\n@@ -108,9 +127,55 @@ class BaseOffPolicyEstimatorTuning:\ndelta=self.delta,\n)\nself.estimated_mse_score_dict[hyperparam_] = estimated_mse_score\n- self.best_hyperparam = min(\n- self.estimated_mse_score_dict.items(), key=lambda x: x[1]\n- )[0]\n+ return min(self.estimated_mse_score_dict.items(), key=lambda x: x[1])[0]\n+\n+ def _tune_hyperparam_with_slope(\n+ self,\n+ reward: np.ndarray,\n+ action: np.ndarray,\n+ pscore: np.ndarray,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: Optional[np.ndarray] = None,\n+ position: Optional[np.ndarray] = None,\n+ ) -> float:\n+ \"\"\"Find the best hyperparameter value from the candidate set by SLOPE.\"\"\"\n+ C = np.sqrt(6) - 1\n+ theta_list, cnf_list = [], []\n+ theta_list_for_sort, cnf_list_for_sort = [], []\n+ for hyperparam_ in self.lambdas:\n+ estimated_round_rewards = self.base_ope_estimator(\n+ hyperparam_\n+ )._estimate_round_rewards(\n+ reward=reward,\n+ action=action,\n+ pscore=pscore,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ position=position,\n+ )\n+ theta_list_for_sort.append(estimated_round_rewards.mean())\n+ cnf = estimated_round_rewards.mean()\n+ cnf -= estimate_student_t_lower_bound(\n+ x=estimated_round_rewards,\n+ delta=self.delta,\n+ )\n+ cnf_list_for_sort.append(cnf)\n+\n+ sorted_idx_list = np.argsort(cnf_list_for_sort)[::-1]\n+ for i, idx in enumerate(sorted_idx_list):\n+ cnf_i = cnf_list_for_sort[idx]\n+ theta_i = theta_list_for_sort[idx]\n+ if len(theta_list) < 1:\n+ theta_list.append(theta_i), cnf_list.append(cnf_i)\n+ else:\n+ theta_j, cnf_j = np.array(theta_list), np.array(cnf_list)\n+ if (np.abs(theta_j - theta_i) <= cnf_i + C * cnf_j).all():\n+ theta_list.append(theta_i), cnf_list.append(cnf_i)\n+ else:\n+ best_idx = sorted_idx_list[i - 1]\n+ return self.lambdas[best_idx]\n+\n+ return self.lambdas[sorted_idx_list[-1]]\ndef estimate_policy_value_with_tuning(\nself,\n@@ -151,7 +216,17 @@ class BaseOffPolicyEstimatorTuning:\n\"\"\"\n# tune hyperparameter if necessary\nif not hasattr(self, \"best_hyperparam\"):\n- self._tune_hyperparam(\n+ if self.tuning_method == \"mse\":\n+ self.best_hyperparam = self._tune_hyperparam_with_mse(\n+ reward=reward,\n+ action=action,\n+ pscore=pscore,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ position=position,\n+ )\n+ elif self.tuning_method == \"slope\":\n+ self.best_hyperparam = self._tune_hyperparam_with_slope(\nreward=reward,\naction=action,\npscore=pscore,\n@@ -221,7 +296,17 @@ class BaseOffPolicyEstimatorTuning:\n\"\"\"\n# tune hyperparameter if necessary\nif not hasattr(self, \"best_hyperparam\"):\n- self._tune_hyperparam(\n+ if self.tuning_method == \"mse\":\n+ self.best_hyperparam = self._tune_hyperparam_with_mse(\n+ reward=reward,\n+ action=action,\n+ pscore=pscore,\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ position=position,\n+ )\n+ elif self.tuning_method == \"slope\":\n+ self.best_hyperparam = self._tune_hyperparam_with_slope(\nreward=reward,\naction=action,\npscore=pscore,\n@@ -250,8 +335,14 @@ class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):\n----------\nlambdas: List[float]\nA list of candidate clipping hyperparameters.\n- The automatic hyperparameter tuning proposed by Su et al.(2020)\n- will choose the best hyperparameter value from the data.\n+ The automatic hyperparameter tuning procedure proposed by Su et al.(2020)\n+ or Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+\n+ tuning_method: str, default=\"slope\".\n+ A method used to tune the hyperparameter of an OPE estimator.\n+ Must be either of \"slope\" or \"mse\".\n+ Note that the implementation of \"slope\" is based on SLOPE++ proposed by Tucker and Lee.(2021),\n+ which improves the original SLOPE proposed by Su et al.(2020).\nuse_bias_upper_bound: bool, default=True\nWhether to use bias upper bound in hyperparameter tuning.\n@@ -416,8 +507,14 @@ class DoublyRobustTuning(BaseOffPolicyEstimatorTuning):\n----------\nlambdas: List[float]\nA list of candidate clipping hyperparameters.\n- The automatic hyperparameter tuning proposed by Su et al.(2020)\n- will choose the best hyperparameter value from the data.\n+ The automatic hyperparameter tuning procedure proposed by Su et al.(2020)\n+ or Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+\n+ tuning_method: str, default=\"slope\".\n+ A method used to tune the hyperparameter of an OPE estimator.\n+ Must be either of \"slope\" or \"mse\".\n+ Note that the implementation of \"slope\" is based on SLOPE++ proposed by Tucker and Lee.(2021),\n+ which improves the original SLOPE proposed by Su et al.(2020).\nestimator_name: str, default='dr'.\nName of the estimator.\n@@ -596,8 +693,14 @@ class SwitchDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\n----------\nlambdas: List[float]\nA list of candidate switching hyperparameters.\n- The automatic hyperparameter tuning proposed by Su et al.(2020)\n- will choose the best hyperparameter value from the data.\n+ The automatic hyperparameter tuning procedure proposed by Su et al.(2020)\n+ or Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+\n+ tuning_method: str, default=\"slope\".\n+ A method used to tune the hyperparameter of an OPE estimator.\n+ Must be either of \"slope\" or \"mse\".\n+ Note that the implementation of \"slope\" is based on SLOPE++ proposed by Tucker and Lee.(2021),\n+ which improves the original SLOPE proposed by Su et al.(2020).\nestimator_name: str, default='switch-dr'.\nName of the estimator.\n@@ -775,8 +878,14 @@ class DoublyRobustWithShrinkageTuning(BaseOffPolicyEstimatorTuning):\n----------\nlambdas: List[float]\nA list of candidate shrinkage hyperparameters.\n- The automatic hyperparameter tuning proposed by Su et al.(2020)\n- will choose the best hyperparameter value from the data.\n+ The automatic hyperparameter tuning procedure proposed by Su et al.(2020)\n+ or Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+\n+ tuning_method: str, default=\"slope\".\n+ A method used to tune the hyperparameter of an OPE estimator.\n+ Must be either of \"slope\" or \"mse\".\n+ Note that the implementation of \"slope\" is based on SLOPE++ proposed by Tucker and Lee.(2021),\n+ which improves the original SLOPE proposed by Su et al.(2020).\nestimator_name: str, default='dr-os'.\nName of the estimator.\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement SLOPE for hyperparam tuning of ope |
641,006 | 31.12.2021 22:34:14 | -32,400 | eb9fa0de226e62aa473a01411ce5c0ec02f1a6bf | fix test of ipw and dr; add test of importance weight estimator; fix importance weight estimator | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/classification_model.py",
"new_path": "obp/ope/classification_model.py",
"diff": "@@ -13,7 +13,7 @@ from sklearn.utils import check_random_state\nfrom sklearn.utils import check_scalar\nfrom sklearn.calibration import CalibratedClassifierCV\n-from ..utils import check_array, sample_action_fast\n+from ..utils import check_array, sample_action_fast, check_bandit_feedback_inputs\n@dataclass\n@@ -36,12 +36,14 @@ class ImportanceWeightEstimator(BaseEstimator):\naction_context: array-like, shape (n_actions, dim_action_context), default=None\nContext vector characterizing action (i.e., vector representation of each action).\nIf not given, one-hot encoding of the action variable is used as default.\n+ If fitting_method is 'raw', action_context must be None.\nfitting_method: str, default='sample'\nMethod to fit the classification model.\nMust be one of ['sample', 'raw']. Each method is defined as follows:\n- sample: actions are sampled by applying Gumbel-softmax trick to action_dist_at_position, and action features are represented by one-hot encoding of the sampled action.\n- raw: action_dist_at_position are directly encoded as action features.\n+ If fitting_method is 'raw', action_context must be None.\ncalibration_cv: int, default=2\nNumber of folds in the calibration procedure.\n@@ -73,6 +75,12 @@ class ImportanceWeightEstimator(BaseEstimator):\nraise ValueError(\nf\"fitting_method must be either 'sample' or 'raw', but {self.fitting_method} is given\"\n)\n+ if self.fitting_method == \"raw\" and self.action_context is not None:\n+ check_array(array=self.action_context, name=\"action_context\", expected_dim=2)\n+ if self.action_context.shape != (self.n_actions, self.n_actions) or not np.allclose(self.action_context, np.eye(self.n_actions)):\n+ raise ValueError(\n+ \"If fitting_method == 'raw', action_context must be None or identity matrix of size (n_actions, n_actions).\"\n+ )\nif not isinstance(self.base_model, BaseEstimator):\nraise ValueError(\n\"base_model must be BaseEstimator or a child class of BaseEstimator\"\n@@ -124,12 +132,14 @@ class ImportanceWeightEstimator(BaseEstimator):\n`random_state` affects the sampling of actions from the evaluation policy.\n\"\"\"\n- check_array(array=context, name=\"context\", expected_dim=2)\n- check_array(array=action, name=\"action\", expected_dim=1)\n+ check_bandit_feedback_inputs(\n+ context=context,\n+ action=action,\n+ reward=np.zeros_like(action), # use dummy reward\n+ position=position,\n+ action_context=self.action_context,\n+ )\ncheck_array(array=action_dist, name=\"action_dist\", expected_dim=3)\n- if not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):\n- raise ValueError(\"action elements must be non-negative integers\")\n-\nn_rounds = context.shape[0]\nif position is None or self.len_list == 1:\n@@ -148,7 +158,7 @@ class ImportanceWeightEstimator(BaseEstimator):\nraise ValueError(\"action_dist must be a probability distribution\")\n# If self.fitting_method != \"sample\", `sampled_action` has no information\n- sampled_action = np.zeros((n_rounds, self.n_actions, self.len_list))\n+ sampled_action = np.zeros(n_rounds, dtype=int)\nif self.fitting_method == \"sample\":\nfor position_ in np.arange(self.len_list):\nidx = position == position_\n@@ -156,11 +166,7 @@ class ImportanceWeightEstimator(BaseEstimator):\naction_dist=action_dist[idx][:, :, position_],\nrandom_state=random_state,\n)\n- sampled_action[\n- idx,\n- sampled_action_at_position,\n- position_,\n- ] = 1\n+ sampled_action[idx] = sampled_action_at_position\nfor position_ in np.arange(self.len_list):\nidx = position == position_\n@@ -169,7 +175,7 @@ class ImportanceWeightEstimator(BaseEstimator):\ncontext=context[idx],\naction=action[idx],\naction_dist_at_position=action_dist_at_position,\n- sampled_action_at_position=sampled_action[idx][:, :, position_],\n+ sampled_action_at_position=sampled_action[idx],\n)\nif X.shape[0] == 0:\nraise ValueError(f\"No training data at position {position_}\")\n@@ -267,11 +273,14 @@ class ImportanceWeightEstimator(BaseEstimator):\nImportance weights estimated via supervised classification, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\n\"\"\"\n- check_array(array=context, name=\"context\", expected_dim=2)\n- check_array(array=action, name=\"action\", expected_dim=1)\n+ check_bandit_feedback_inputs(\n+ context=context,\n+ action=action,\n+ reward=np.zeros_like(action), # use dummy reward\n+ position=position,\n+ action_context=self.action_context,\n+ )\ncheck_array(array=action_dist, name=\"action_dist\", expected_dim=3)\n- if not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):\n- raise ValueError(\"action elements must be non-negative integers\")\nn_rounds = context.shape[0]\n@@ -323,9 +332,7 @@ class ImportanceWeightEstimator(BaseEstimator):\nposition=position[test_idx],\n)\nif evaluate_model_performance:\n- sampled_action = np.zeros(\n- (test_idx.shape[0], self.n_actions, self.len_list)\n- )\n+ sampled_action = np.zeros(test_idx.shape[0], dtype=int)\nif self.fitting_method == \"sample\":\nfor position_ in np.arange(self.len_list):\nidx = position[test_idx] == position_\n@@ -333,11 +340,7 @@ class ImportanceWeightEstimator(BaseEstimator):\naction_dist=action_dist[test_idx][idx][:, :, position_],\nrandom_state=random_state,\n)\n- sampled_action[\n- idx,\n- sampled_action_at_position,\n- position_,\n- ] = 1\n+ sampled_action[idx] = sampled_action_at_position\nfor position_ in np.arange(self.len_list):\nidx = position[test_idx] == position_\naction_dist_at_position = action_dist[test_idx][idx][\n@@ -347,7 +350,7 @@ class ImportanceWeightEstimator(BaseEstimator):\ncontext=context[test_idx][idx],\naction=action[test_idx][idx],\naction_dist_at_position=action_dist_at_position,\n- sampled_action_at_position=sampled_action[idx][:, :, position_],\n+ sampled_action_at_position=sampled_action[idx],\n)\nproba_of_evaluation_policy = self.base_model_list[\nposition_\n@@ -395,7 +398,9 @@ class ImportanceWeightEstimator(BaseEstimator):\nif self.fitting_method == \"raw\":\nevaluation_policy_feature = np.c_[context, action_dist_at_position]\nelif self.fitting_method == \"sample\":\n- evaluation_policy_feature = np.c_[context, sampled_action_at_position]\n+ evaluation_policy_feature = np.c_[\n+ context, self.action_context[sampled_action_at_position]\n+ ]\nX = np.copy(behavior_policy_feature)\ny = np.zeros(X.shape[0], dtype=int)\nX = np.r_[X, evaluation_policy_feature]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -263,6 +263,27 @@ switch_dr_tuning = SwitchDoublyRobustTuning(\nlambdas=[1, 100], estimator_name=\"switch_dr_tuning\"\n)\nswitch_dr_max = SwitchDoublyRobust(lambda_=np.inf)\n+# estimated pscore\n+dr_estimated_pscore = DoublyRobust(use_estimated_pscore=True)\n+dr_os_estimated_pscore = DoublyRobustWithShrinkage(use_estimated_pscore=True)\n+dr_tuning_estimated_pscore = DoublyRobustTuning(\n+ lambdas=[1, 100],\n+ estimator_name=\"dr_tuning_estimated_pscore\",\n+ use_estimated_pscore=True,\n+)\n+dr_os_tuning_estimated_pscore = DoublyRobustWithShrinkageTuning(\n+ lambdas=[1, 100],\n+ estimator_name=\"dr_os_tuning_estimated_pscore\",\n+ use_estimated_pscore=True,\n+)\n+sndr_estimated_pscore = SelfNormalizedDoublyRobust(use_estimated_pscore=True)\n+switch_dr_estimated_pscore = SwitchDoublyRobust(use_estimated_pscore=True)\n+switch_dr_tuning_estimated_pscore = SwitchDoublyRobustTuning(\n+ lambdas=[1, 100],\n+ estimator_name=\"switch_dr_tuning_estimated_pscore\",\n+ use_estimated_pscore=True,\n+)\n+\ndr_estimators = [\ndr,\n@@ -272,6 +293,13 @@ dr_estimators = [\nsndr,\nswitch_dr_0,\nswitch_dr_tuning,\n+ dr_estimated_pscore,\n+ dr_os_estimated_pscore,\n+ dr_tuning_estimated_pscore,\n+ dr_os_tuning_estimated_pscore,\n+ sndr_estimated_pscore,\n+ switch_dr_estimated_pscore,\n+ switch_dr_tuning_estimated_pscore,\n]\n@@ -556,6 +584,7 @@ valid_input_of_dr_variants = [\nnp.random.uniform(low=0.5, high=1.0, size=5),\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n+ np.random.uniform(low=0.5, high=1.0, size=5),\n0.5,\n\"all arguments are given and len_list > 1\",\n)\n@@ -563,7 +592,7 @@ valid_input_of_dr_variants = [\[email protected](\n- \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, hyperparameter, description\",\n+ \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, estimated_pscore, hyperparameter, description\",\nvalid_input_of_dr_variants,\n)\ndef test_dr_variants_using_valid_input_data(\n@@ -573,6 +602,7 @@ def test_dr_variants_using_valid_input_data(\npscore: np.ndarray,\nposition: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ estimated_pscore: np.ndarray,\nhyperparameter: float,\ndescription: str,\n) -> None:\n@@ -585,7 +615,28 @@ def test_dr_variants_using_valid_input_data(\ndr_os_tuning = DoublyRobustWithShrinkageTuning(\nlambdas=[hyperparameter, hyperparameter * 10]\n)\n- for estimator in [switch_dr, switch_dr_tuning, dr_os, dr_os_tuning]:\n+ switch_dr_estimated_pscore = SwitchDoublyRobust(\n+ lambda_=hyperparameter, use_estimated_pscore=True\n+ )\n+ switch_dr_tuning_estimated_pscore = SwitchDoublyRobustTuning(\n+ lambdas=[hyperparameter, hyperparameter * 10], use_estimated_pscore=True\n+ )\n+ dr_os_estimated_pscore = DoublyRobustWithShrinkage(\n+ lambda_=hyperparameter, use_estimated_pscore=True\n+ )\n+ dr_os_tuning_estimated_pscore = DoublyRobustWithShrinkageTuning(\n+ lambdas=[hyperparameter, hyperparameter * 10], use_estimated_pscore=True\n+ )\n+ for estimator in [\n+ switch_dr,\n+ switch_dr_tuning,\n+ dr_os,\n+ dr_os_tuning,\n+ switch_dr_estimated_pscore,\n+ switch_dr_tuning_estimated_pscore,\n+ dr_os_estimated_pscore,\n+ dr_os_tuning_estimated_pscore,\n+ ]:\nest = estimator.estimate_policy_value(\naction_dist=action_dist,\naction=action,\n@@ -593,6 +644,7 @@ def test_dr_variants_using_valid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\nassert est == 0.0, f\"policy value must be 0, but {est}\"\n@@ -613,6 +665,7 @@ def test_dr_using_random_evaluation_policy(\n}\ninput_dict[\"action_dist\"] = action_dist\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\n+ input_dict[\"estimated_pscore\"] = input_dict[\"pscore\"]\n# dr estimators require all arguments\nfor estimator in dr_estimators:\nestimated_policy_value = estimator.estimate_policy_value(**input_dict)\n@@ -651,7 +704,7 @@ def test_boundedness_of_sndr_using_random_evaluation_policy(\ninput_dict[\"action_dist\"] = action_dist\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\n# make pscore too small (to check the boundedness of sndr)\n- input_dict[\"pscore\"] = input_dict[\"pscore\"] ** 3\n+ input_dict[\"estimated_pscore\"] = input_dict[\"pscore\"]\nestimated_policy_value = sndr.estimate_policy_value(**input_dict)\nassert (\nestimated_policy_value <= 2\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ope/test_importance_weight_estimator.py",
"diff": "+from pathlib import Path\n+from typing import Dict\n+\n+from conftest import generate_action_dist\n+import numpy as np\n+import pytest\n+from sklearn.base import BaseEstimator\n+from sklearn.ensemble import GradientBoostingClassifier\n+from sklearn.ensemble import RandomForestClassifier\n+from sklearn.linear_model import LogisticRegression\n+from sklearn.metrics import roc_auc_score\n+import yaml\n+\n+from obp.ope import ImportanceWeightEstimator\n+from obp.types import BanditFeedback\n+\n+\n+np.random.seed(1)\n+\n+binary_model_dict = dict(\n+ logistic_regression=LogisticRegression,\n+ lightgbm=GradientBoostingClassifier,\n+ random_forest=RandomForestClassifier,\n+)\n+\n+# hyperparameter settings for the base ML model in importance weight estimator\n+cd_path = Path(__file__).parent.resolve()\n+with open(cd_path / \"hyperparams.yaml\", \"rb\") as f:\n+ hyperparams = yaml.safe_load(f)\n+\n+\n+# action_context, n_actions, len_list, fitting_method, base_model, calibration_cv, err, description\n+n_rounds = 1000\n+n_actions = 3\n+len_list = 3\n+\n+invalid_input_of_initializing_importance_weight_estimator = [\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ \"a\", #\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ TypeError,\n+ \"`n_actions` must be an instance of <class 'int'>, not <class 'str'>.\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ 1, #\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ ValueError,\n+ \"`n_actions`= 1, must be >= 2\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ \"a\", #\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ TypeError,\n+ \"`len_list` must be an instance of <class 'int'>, not <class 'str'>.\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ 0, #\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ ValueError,\n+ \"`len_list`= 0, must be >= 1\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ 1, #\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ ValueError,\n+ \"fitting_method must be either 'sample' or 'raw', but 1 is given\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"awesome\", #\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ ValueError,\n+ \"fitting_method must be either 'sample' or 'raw', but awesome is given\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)), #\n+ n_actions,\n+ len_list,\n+ \"raw\", #\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ ValueError,\n+ \"If fitting_method == 'raw', action_context must be None or identity matrix\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ \"RandomForest\", #\n+ 2,\n+ ValueError,\n+ \"base_model must be BaseEstimator or a child class of BaseEstimator\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 1.5,\n+ TypeError,\n+ \"`calibration_cv` must be an instance of <class 'int'>, not <class 'float'>.\",\n+ ),\n+]\n+\n+\n+# context, action, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, n_folds, random_state, calibration_cv, err, description\n+invalid_input_of_fitting_importance_weight_estimator = [\n+ (\n+ None, #\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"context must be 2D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ None, #\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7, 3)), #\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"context must be 2D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=(n_rounds, 3)), #\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice([\"1\", \"a\"], size=n_rounds), #\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice([-1, -3], size=n_rounds), #\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ \"3\", #\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice(len_list, size=(n_rounds, 3)), #\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice(len_list, size=n_rounds - 1), #\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"Expected `context.shape[0]\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice([\"a\", \"1\"], size=n_rounds), #\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice([-1, -3], size=n_rounds), #\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds - 1), #\n+ None,\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"Expected `context.shape[0]\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds - 1), #\n+ None,\n+ None,\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"Expected `context.shape[0]\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ \"3\", #\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action_context must be 2D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8, 3)), #\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action_context must be 2D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ (np.arange(n_rounds) % n_actions) + 1, #\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ r\"action elements must be smaller than\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.ones((n_rounds, 2)), #\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.ones(n_rounds, dtype=int) * len_list, #\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position elements must be smaller than len_list\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ None, #\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action_dist must be 3D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ np.zeros((n_rounds, n_actions, len_list - 1)), #\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"shape of action_dist must be (n_rounds, n_actions, len_list)\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ np.zeros((n_rounds, n_actions, len_list)), #\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action_dist must be a probability distribution\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ \"a\", #\n+ None,\n+ 2,\n+ TypeError,\n+ \"`n_folds` must be an instance of <class 'int'>, not <class 'str'>\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 0, #\n+ None,\n+ 2,\n+ ValueError,\n+ \"`n_folds`= 0, must be >= 1.\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ \"a\", #\n+ 2,\n+ ValueError,\n+ \"'a' cannot be used to seed a numpy.random.RandomState instance\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.zeros(n_rounds, dtype=int), #\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"No training data at position\",\n+ ),\n+]\n+\n+\n+valid_input_of_importance_weight_estimator = [\n+ (\n+ np.random.uniform(size=(n_rounds * 100, 7)),\n+ np.arange(n_rounds * 100) % n_actions,\n+ np.random.choice(len_list, size=n_rounds * 100),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds * 100, n_actions, len_list),\n+ 3,\n+ 1,\n+ 2,\n+ \"valid input with cross fitting\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds * 100, 7)),\n+ np.arange(n_rounds * 100) % n_actions,\n+ np.random.choice(len_list, size=n_rounds * 100),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds * 100, n_actions, len_list),\n+ 3,\n+ 2,\n+ 1,\n+ \"valid input with cross fitting\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ len_list,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 1,\n+ 1,\n+ 2,\n+ \"valid input without cross fitting\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ None,\n+ np.random.uniform(size=(n_actions, 8)),\n+ n_actions,\n+ 1,\n+ \"sample\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, 1),\n+ 1,\n+ 1,\n+ 2,\n+ \"valid input without position\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ None,\n+ None,\n+ n_actions,\n+ 1,\n+ \"raw\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, 1),\n+ 1,\n+ 1,\n+ 2,\n+ \"valid input without position when fitting_method is `raw`\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ None,\n+ n_actions,\n+ len_list,\n+ \"raw\",\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ generate_action_dist(n_rounds, n_actions, len_list),\n+ 1,\n+ 1,\n+ 2,\n+ \"valid input when fitting_method is `raw`\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_context, n_actions, len_list, fitting_method, base_model, calibration_cv, err, description\",\n+ invalid_input_of_initializing_importance_weight_estimator,\n+)\n+def test_initializing_importance_weight_estimator_using_invalid_input_data(\n+ action_context: np.ndarray,\n+ n_actions: int,\n+ len_list: int,\n+ fitting_method: str,\n+ base_model: BaseEstimator,\n+ calibration_cv: int,\n+ err,\n+ description: str,\n+) -> None:\n+ # initialization raises ValueError\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = ImportanceWeightEstimator(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ action_context=action_context,\n+ base_model=base_model,\n+ fitting_method=fitting_method,\n+ calibration_cv=calibration_cv,\n+ )\n+\n+\[email protected](\n+ \"context, action, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, n_folds, random_state, calibration_cv, err, description\",\n+ invalid_input_of_fitting_importance_weight_estimator,\n+)\n+def test_fitting_importance_weight_estimator_using_invalid_input_data(\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ position: np.ndarray,\n+ action_context: np.ndarray,\n+ n_actions: int,\n+ len_list: int,\n+ fitting_method: str,\n+ base_model: BaseEstimator,\n+ action_dist: np.ndarray,\n+ n_folds: int,\n+ random_state: int,\n+ calibration_cv: int,\n+ err,\n+ description: str,\n+) -> None:\n+ # fit_predict function raises ValueError\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ importance_weight_estimator = ImportanceWeightEstimator(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ action_context=action_context,\n+ base_model=base_model,\n+ fitting_method=fitting_method,\n+ calibration_cv=calibration_cv,\n+ )\n+ # train importance weight estimator on logged bandit feedback data\n+ _ = importance_weight_estimator.fit_predict(\n+ context=context,\n+ action=action,\n+ position=position,\n+ n_folds=n_folds,\n+ random_state=random_state,\n+ action_dist=action_dist,\n+ )\n+\n+\[email protected](\n+ \"context, action, position, action_context, n_actions, len_list, fitting_method, base_model, action_dist, n_folds, random_state, calibration_cv, description\",\n+ valid_input_of_importance_weight_estimator,\n+)\n+def test_importance_weight_estimator_using_valid_input_data(\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ position: np.ndarray,\n+ action_context: np.ndarray,\n+ n_actions: int,\n+ len_list: int,\n+ fitting_method: str,\n+ base_model: BaseEstimator,\n+ action_dist: np.ndarray,\n+ n_folds: int,\n+ random_state: int,\n+ calibration_cv: int,\n+ description: str,\n+) -> None:\n+ # fit_predict\n+ importance_weight_estimator = ImportanceWeightEstimator(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ action_context=action_context,\n+ base_model=base_model,\n+ fitting_method=fitting_method,\n+ calibration_cv=calibration_cv,\n+ )\n+ # train importance weight estimator on logged bandit feedback data\n+ _ = importance_weight_estimator.fit_predict(\n+ context=context,\n+ action=action,\n+ action_dist=action_dist,\n+ position=position,\n+ n_folds=n_folds,\n+ random_state=random_state,\n+ )\n+\n+\n+def test_performance_of_binary_outcome_models(\n+ fixed_synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n+) -> None:\n+ \"\"\"\n+ Test the performance of ope estimators using synthetic bandit data and random evaluation policy\n+ when the importance weight estimator is estimated by a logistic regression\n+ \"\"\"\n+ bandit_feedback = fixed_synthetic_bandit_feedback.copy()\n+ action_dist = random_action_dist\n+ random_state = 12345\n+ auc_scores: Dict[str, float] = {}\n+ fit_methods = [\"sample\", \"raw\"]\n+ for fit_method in fit_methods:\n+ for model_name, model in binary_model_dict.items():\n+ importance_weight_estimator = ImportanceWeightEstimator(\n+ n_actions=bandit_feedback[\"n_actions\"],\n+ action_context=bandit_feedback[\"action_context\"],\n+ base_model=model(**hyperparams[model_name]),\n+ fitting_method=fit_method,\n+ len_list=1,\n+ )\n+ # train importance weight estimator on logged bandit feedback data\n+ estimated_importance_weight = importance_weight_estimator.fit_predict(\n+ context=bandit_feedback[\"context\"],\n+ action=bandit_feedback[\"action\"],\n+ action_dist=action_dist,\n+ n_folds=2, # 2-fold cross-fitting\n+ random_state=random_state,\n+ evaluate_model_performance=True,\n+ )\n+ assert np.all(\n+ estimated_importance_weight >= 0\n+ ), \"estimated_importance_weight must be non-negative\"\n+ # extract predictions\n+ tmp_y = []\n+ tmp_pred = []\n+ for i in range(len(importance_weight_estimator.eval_result[\"y\"])):\n+ tmp_y.append(importance_weight_estimator.eval_result[\"y\"][i])\n+ tmp_pred.append(importance_weight_estimator.eval_result[\"proba\"][i])\n+ y_test = np.array(tmp_y).flatten()\n+ y_pred = np.array(tmp_pred).flatten()\n+ auc_scores[model_name + \"_\" + fit_method] = roc_auc_score(\n+ y_true=y_test,\n+ y_score=y_pred,\n+ )\n+\n+ for model_name in auc_scores:\n+ print(f\"AUC of {model_name} is {auc_scores[model_name]}\")\n+ assert (\n+ auc_scores[model_name] > 0.5\n+ ), f\"AUC of {model_name} should be greater than 0.5\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators.py",
"new_path": "tests/ope/test_ipw_estimators.py",
"diff": "@@ -461,6 +461,26 @@ def test_ipw_using_random_evaluation_policy(\nassert isinstance(\nestimated_policy_value, float\n), f\"invalid type response: {estimator}\"\n+\n+ # ipw with estimated pscore\n+ ipw_estimated_pscore = InverseProbabilityWeighting(use_estimated_pscore=True)\n+ snipw_estimated_pscore = SelfNormalizedInverseProbabilityWeighting(\n+ use_estimated_pscore=True\n+ )\n+ ipw_tuning_estimated_pscore = InverseProbabilityWeightingTuning(\n+ lambdas=[10, 1000], use_estimated_pscore=True\n+ )\n+ input_dict[\"estimated_pscore\"] = input_dict[\"pscore\"]\n+ for estimator in [\n+ ipw_estimated_pscore,\n+ snipw_estimated_pscore,\n+ ipw_tuning_estimated_pscore,\n+ ]:\n+ estimated_policy_value = estimator.estimate_policy_value(**input_dict)\n+ assert isinstance(\n+ estimated_policy_value, float\n+ ), f\"invalid type response: {estimator}\"\n+\n# remove necessary keys\ndel input_dict[\"reward\"]\ndel input_dict[\"action\"]\n@@ -496,3 +516,13 @@ def test_boundedness_of_snipw_using_random_evaluation_policy(\nassert (\nestimated_policy_value <= 1\n), f\"estimated policy value of snipw should be smaller than or equal to 1 (because of its 1-boundedness), but the value is: {estimated_policy_value}\"\n+\n+ # ipw with estimated pscore\n+ snipw_estimated_pscore = SelfNormalizedInverseProbabilityWeighting(\n+ use_estimated_pscore=True\n+ )\n+ input_dict[\"estimated_pscore\"] = input_dict[\"pscore\"]\n+ estimated_policy_value = snipw_estimated_pscore.estimate_policy_value(**input_dict)\n+ assert (\n+ estimated_policy_value <= 1\n+ ), f\"estimated policy value of snipw should be smaller than or equal to 1 (because of its 1-boundedness), but the value is: {estimated_policy_value}\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix test of ipw and dr; add test of importance weight estimator; fix importance weight estimator |
641,006 | 01.01.2022 01:59:16 | -32,400 | 36d4d86d8f56b78e5565ea8d7fd68e116b9af582 | add tests of propensity score estimator | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/classification_model.py",
"new_path": "obp/ope/classification_model.py",
"diff": "@@ -287,7 +287,6 @@ class ImportanceWeightEstimator(BaseEstimator):\nif position is None or self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\n- check_array(array=position, name=\"position\", expected_dim=1)\nif position.max() >= self.len_list:\nraise ValueError(\nf\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n@@ -424,10 +423,6 @@ class PropensityScoreEstimator(BaseEstimator):\nLength of a list of actions recommended in each impression.\nWhen Open Bandit Dataset is used, 3 should be set.\n- action_context: array-like, shape (n_actions, dim_action_context), default=None\n- Context vector characterizing action (i.e., vector representation of each action).\n- If not given, one-hot encoding of the action variable is used as default.\n-\ncalibration_cv: int, default=2\nNumber of folds in the calibration procedure.\nIf calibration_cv <= 1, classification model is not calibrated.\n@@ -442,7 +437,6 @@ class PropensityScoreEstimator(BaseEstimator):\nbase_model: BaseEstimator\nn_actions: int\nlen_list: int = 1\n- action_context: Optional[np.ndarray] = None\ncalibration_cv: int = 2\ndef __post_init__(self) -> None:\n@@ -468,8 +462,6 @@ class PropensityScoreEstimator(BaseEstimator):\nself.base_model_list = [\nclone(self.base_model) for _ in np.arange(self.len_list)\n]\n- if self.action_context is None:\n- self.action_context = np.eye(self.n_actions, dtype=int)\ndef fit(\nself,\n@@ -493,15 +485,17 @@ class PropensityScoreEstimator(BaseEstimator):\nWhen `len_list` > 1, this position argument has to be set.\n\"\"\"\n- check_array(array=context, name=\"context\", expected_dim=2)\n- check_array(array=action, name=\"action\", expected_dim=1)\n- if not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):\n- raise ValueError(\"action elements must be non-negative integers\")\n+ check_bandit_feedback_inputs(\n+ context=context,\n+ action=action,\n+ reward=np.zeros_like(action), # use dummy reward\n+ position=position,\n+ action_context=np.eye(self.n_actions, dtype=int),\n+ )\nif position is None or self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\n- check_array(array=position, name=\"position\", expected_dim=1)\nif position.max() >= self.len_list:\nraise ValueError(\nf\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n@@ -598,17 +592,19 @@ class PropensityScoreEstimator(BaseEstimator):\nEstimated propensity score, i.e., :math:`\\\\hat{\\\\pi}_b (a \\\\mid x)`.\n\"\"\"\n- check_array(array=context, name=\"context\", expected_dim=2)\n- check_array(array=action, name=\"action\", expected_dim=1)\n- if not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):\n- raise ValueError(\"action elements must be non-negative integers\")\n+ check_bandit_feedback_inputs(\n+ context=context,\n+ action=action,\n+ reward=np.zeros_like(action), # use dummy reward\n+ position=position,\n+ action_context=np.eye(self.n_actions, dtype=int),\n+ )\nn_rounds = context.shape[0]\nif position is None or self.len_list == 1:\nposition = np.zeros_like(action)\nelse:\n- check_array(array=position, name=\"position\", expected_dim=1)\nif position.max() >= self.len_list:\nraise ValueError(\nf\"position elements must be smaller than len_list, but the maximum value is {position.max()} (>= {self.len_list})\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ope/test_propensity_score_estimator.py",
"diff": "+from pathlib import Path\n+from typing import Dict\n+\n+import numpy as np\n+import pytest\n+from sklearn.base import BaseEstimator\n+from sklearn.ensemble import GradientBoostingClassifier\n+from sklearn.ensemble import RandomForestClassifier\n+from sklearn.linear_model import LogisticRegression\n+from sklearn.metrics import roc_auc_score\n+import yaml\n+\n+from obp.ope import PropensityScoreEstimator\n+from obp.types import BanditFeedback\n+\n+\n+np.random.seed(1)\n+\n+binary_model_dict = dict(\n+ logistic_regression=LogisticRegression,\n+ lightgbm=GradientBoostingClassifier,\n+ random_forest=RandomForestClassifier,\n+)\n+\n+# hyperparameter settings for the base ML model in propensity score estimator\n+cd_path = Path(__file__).parent.resolve()\n+with open(cd_path / \"hyperparams.yaml\", \"rb\") as f:\n+ hyperparams = yaml.safe_load(f)\n+\n+\n+# n_actions, len_list, base_model, calibration_cv, err, description\n+n_rounds = 1000\n+n_actions = 3\n+len_list = 3\n+\n+invalid_input_of_initializing_propensity_score_estimator = [\n+ (\n+ \"a\", #\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ TypeError,\n+ \"`n_actions` must be an instance of <class 'int'>, not <class 'str'>.\",\n+ ),\n+ (\n+ 1, #\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ ValueError,\n+ \"`n_actions`= 1, must be >= 2\",\n+ ),\n+ (\n+ n_actions,\n+ \"a\", #\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ TypeError,\n+ \"`len_list` must be an instance of <class 'int'>, not <class 'str'>.\",\n+ ),\n+ (\n+ n_actions,\n+ 0, #\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 2,\n+ ValueError,\n+ \"`len_list`= 0, must be >= 1\",\n+ ),\n+ (\n+ n_actions,\n+ len_list,\n+ \"RandomForest\", #\n+ 2,\n+ ValueError,\n+ \"base_model must be BaseEstimator or a child class of BaseEstimator\",\n+ ),\n+ (\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 1.5,\n+ TypeError,\n+ \"`calibration_cv` must be an instance of <class 'int'>, not <class 'float'>.\",\n+ ),\n+]\n+\n+\n+# context, action, position, n_actions, len_list, base_model, n_folds, random_state, calibration_cv, err, description\n+invalid_input_of_fitting_propensity_score_estimator = [\n+ (\n+ None, #\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"context must be 2D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ None, #\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7, 3)), #\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"context must be 2D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=(n_rounds, 3)), #\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice([\"1\", \"a\"], size=n_rounds), #\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice([-1, -3], size=n_rounds), #\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ \"3\", #\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice(len_list, size=(n_rounds, 3)), #\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice(len_list, size=n_rounds - 1), #\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"Expected `context.shape[0]\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice([\"a\", \"1\"], size=n_rounds), #\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds),\n+ np.random.choice([-1, -3], size=n_rounds), #\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position elements must be non-negative integers\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds - 1), #\n+ None,\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"Expected `context.shape[0]\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.random.choice(n_actions, size=n_rounds - 1), #\n+ None,\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"Expected `context.shape[0]\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ (np.arange(n_rounds) % n_actions) + 1, #\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ r\"action elements must be smaller than\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.ones((n_rounds, 2)), #\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position must be 1D array\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.ones(n_rounds, dtype=int) * len_list, #\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"position elements must be smaller than len_list\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ \"a\", #\n+ None,\n+ 2,\n+ TypeError,\n+ \"`n_folds` must be an instance of <class 'int'>, not <class 'str'>\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 0, #\n+ None,\n+ 2,\n+ ValueError,\n+ \"`n_folds`= 0, must be >= 1.\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ \"a\", #\n+ 2,\n+ ValueError,\n+ \"'a' cannot be used to seed a numpy.random.RandomState instance\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.zeros(n_rounds, dtype=int), #\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ ValueError,\n+ \"No training data at position\",\n+ ),\n+]\n+\n+\n+valid_input_of_propensity_score_estimator = [\n+ (\n+ np.random.uniform(size=(n_rounds * 100, 7)),\n+ np.arange(n_rounds * 100) % n_actions,\n+ np.random.choice(len_list, size=n_rounds * 100),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 1,\n+ 2,\n+ \"valid input with cross fitting\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds * 100, 7)),\n+ np.arange(n_rounds * 100) % n_actions,\n+ np.random.choice(len_list, size=n_rounds * 100),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 3,\n+ 2,\n+ 1,\n+ \"valid input with cross fitting\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ np.random.choice(len_list, size=n_rounds),\n+ n_actions,\n+ len_list,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 1,\n+ 1,\n+ 2,\n+ \"valid input without cross fitting\",\n+ ),\n+ (\n+ np.random.uniform(size=(n_rounds, 7)),\n+ np.arange(n_rounds) % n_actions,\n+ None,\n+ n_actions,\n+ 1,\n+ RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ 1,\n+ 1,\n+ 2,\n+ \"valid input without position\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"n_actions, len_list, base_model, calibration_cv, err, description\",\n+ invalid_input_of_initializing_propensity_score_estimator,\n+)\n+def test_initializing_propensity_score_estimator_using_invalid_input_data(\n+ n_actions: int,\n+ len_list: int,\n+ base_model: BaseEstimator,\n+ calibration_cv: int,\n+ err,\n+ description: str,\n+) -> None:\n+ # initialization raises ValueError\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = PropensityScoreEstimator(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ base_model=base_model,\n+ calibration_cv=calibration_cv,\n+ )\n+\n+\[email protected](\n+ \"context, action, position, n_actions, len_list, base_model, n_folds, random_state, calibration_cv, err, description\",\n+ invalid_input_of_fitting_propensity_score_estimator,\n+)\n+def test_fitting_propensity_score_estimator_using_invalid_input_data(\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ position: np.ndarray,\n+ n_actions: int,\n+ len_list: int,\n+ base_model: BaseEstimator,\n+ n_folds: int,\n+ random_state: int,\n+ calibration_cv: int,\n+ err,\n+ description: str,\n+) -> None:\n+ # fit_predict function raises ValueError\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ propensity_score_estimator = PropensityScoreEstimator(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ base_model=base_model,\n+ calibration_cv=calibration_cv,\n+ )\n+ # train propensity score estimator on logged bandit feedback data\n+ _ = propensity_score_estimator.fit_predict(\n+ context=context,\n+ action=action,\n+ position=position,\n+ n_folds=n_folds,\n+ random_state=random_state,\n+ )\n+\n+\[email protected](\n+ \"context, action, position, n_actions, len_list, base_model, n_folds, random_state, calibration_cv, description\",\n+ valid_input_of_propensity_score_estimator,\n+)\n+def test_propensity_score_estimator_using_valid_input_data(\n+ context: np.ndarray,\n+ action: np.ndarray,\n+ position: np.ndarray,\n+ n_actions: int,\n+ len_list: int,\n+ base_model: BaseEstimator,\n+ n_folds: int,\n+ random_state: int,\n+ calibration_cv: int,\n+ description: str,\n+) -> None:\n+ # fit_predict\n+ propensity_score_estimator = PropensityScoreEstimator(\n+ n_actions=n_actions,\n+ len_list=len_list,\n+ base_model=base_model,\n+ calibration_cv=calibration_cv,\n+ )\n+ # train propensity score estimator on logged bandit feedback data\n+ _ = propensity_score_estimator.fit_predict(\n+ context=context,\n+ action=action,\n+ position=position,\n+ n_folds=n_folds,\n+ random_state=random_state,\n+ )\n+\n+\n+def test_performance_of_binary_outcome_models(\n+ fixed_synthetic_bandit_feedback: BanditFeedback,\n+) -> None:\n+ \"\"\"\n+ Test the performance of ope estimators using synthetic bandit data and random evaluation policy\n+ when the propensity score estimator is estimated by a logistic regression\n+ \"\"\"\n+ bandit_feedback = fixed_synthetic_bandit_feedback.copy()\n+ random_state = 12345\n+ auc_scores: Dict[str, float] = {}\n+ for model_name, model in binary_model_dict.items():\n+ propensity_score_estimator = PropensityScoreEstimator(\n+ n_actions=bandit_feedback[\"n_actions\"],\n+ base_model=model(**hyperparams[model_name]),\n+ len_list=1,\n+ )\n+ # train propensity score estimator on logged bandit feedback data\n+ estimated_propensity_score = propensity_score_estimator.fit_predict(\n+ context=bandit_feedback[\"context\"],\n+ action=bandit_feedback[\"action\"],\n+ n_folds=2, # 2-fold cross-fitting\n+ random_state=random_state,\n+ evaluate_model_performance=True,\n+ )\n+ assert np.all(\n+ estimated_propensity_score >= 0\n+ ), \"estimated_propensity_score must be non-negative\"\n+ # extract predictions\n+ tmp_y = []\n+ tmp_pred = []\n+ for i in range(len(propensity_score_estimator.eval_result[\"y\"])):\n+ tmp_y.append(propensity_score_estimator.eval_result[\"y\"][i])\n+ tmp_pred.append(propensity_score_estimator.eval_result[\"proba\"][i])\n+ y_test = np.array(tmp_y).flatten()\n+ y_pred = np.array(tmp_pred).reshape(-1, tmp_pred[0].shape[1])\n+ auc_scores[model_name] = roc_auc_score(\n+ y_true=y_test, y_score=y_pred, multi_class=\"ovo\"\n+ )\n+\n+ for model_name in auc_scores:\n+ print(f\"AUC (macro-ovo) of {model_name} is {auc_scores[model_name]}\")\n+ assert (\n+ auc_scores[model_name] > 0.5\n+ ), f\"AUC of {model_name} should be greater than 0.5\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add tests of propensity score estimator |
641,006 | 01.01.2022 17:33:41 | -32,400 | dda71f05e5755c2758bb8df16ce46baca50159be | add bipw tests | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -1810,13 +1810,13 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nclass BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\n\"\"\"Balanced Inverse Probability Weighting (IPW) Estimator.\n- Note (WIP)\n+ Note\n-------\nBalanced Inverse Probability Weighting (IPW) estimates the policy value of evaluation policy :math:`\\\\pi_e` by\n.. math::\n- \\\\hat{V}_{\\\\mathrm{B-IPW}} (\\\\pi_e; \\\\mathcal{D}) := \\\\frac{\\\\mathbb{E}_{\\\\mathcal{D}} [\\\\pi_e (a_t|x_t) \\\\hat{w}(x_t,a_t) r_t]}{\\\\mathbb{E}_{\\\\mathcal{D}} [\\\\pi_e (a_t|x_t) \\\\hat{w}(x_t,a_t)},\n+ \\\\hat{V}_{\\\\mathrm{B-IPW}} (\\\\pi_e; \\\\mathcal{D}) := \\\\frac{\\\\mathbb{E}_{\\\\mathcal{D}} [\\\\hat{w}(x_t,a_t) r_t]}{\\\\mathbb{E}_{\\\\mathcal{D}} [\\\\hat{w}(x_t,a_t)},\nwhere :math:`\\\\mathcal{D}=\\\\{(x_t,a_t,r_t)\\\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by\na behavior policy :math:`\\\\pi_b`.\n@@ -1829,6 +1829,15 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nBalanced IPW can be used even when the behavior policy (or the propensity score of the behavior policy) is not known or the behavior policy is deterministic.\nWhen the evaluation policy is stochastic, it is not well known whether the balanced IPW performs well.\n+\n+ Please note that the original estimator of B-IPW is defined as follows (only when the evaluation policy is deterministic):\n+\n+ .. math::\n+\n+ \\\\hat{V}_{\\\\mathrm{B-IPW}} (\\\\pi_e; \\\\mathcal{D}) := \\\\frac{\\\\mathbb{E}_{\\\\mathcal{D}} [\\\\pi_e (a_t|x_t) \\\\hat{w}(x_t,a_t) r_t]}{\\\\mathbb{E}_{\\\\mathcal{D}} [\\\\pi_e (a_t|x_t) \\\\hat{w}(x_t,a_t)},\n+\n+ where :math:`\\\\pi_e` is the evaluation policy, and our estimator is a bit different from the original estimator.\n+\nParameters\n------------\nlambda_: float, default=np.inf\n@@ -1910,7 +1919,6 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\naction_dist: np.ndarray,\nestimated_importance_weights: np.ndarray,\nposition: Optional[np.ndarray] = None,\n- action_context: Optional[np.ndarray] = None,\n**kwargs,\n) -> np.ndarray:\n\"\"\"Estimate the policy value of evaluation policy.\n@@ -1947,6 +1955,13 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nname=\"estimated_importance_weights\",\nexpected_dim=1,\n)\n+ check_ope_inputs(\n+ action_dist=action_dist,\n+ position=position,\n+ action=action,\n+ reward=reward,\n+ estimated_importance_weights=estimated_importance_weights,\n+ )\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\nreturn self._estimate_round_rewards(\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/ope/test_bipw_estimators.py",
"diff": "+import re\n+\n+from conftest import generate_action_dist\n+import numpy as np\n+import pytest\n+\n+from obp.ope import BalancedInverseProbabilityWeighting\n+from obp.types import BanditFeedback\n+\n+\n+# lambda_, err, description\n+invalid_input_of_bipw_init = [\n+ (\n+ \"\",\n+ TypeError,\n+ r\"`lambda_` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ),\n+ (\n+ None,\n+ TypeError,\n+ r\"`lambda_` must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'NoneType'>.\",\n+ ),\n+ (-1.0, ValueError, \"`lambda_`= -1.0, must be >= 0.0.\"),\n+ (np.nan, ValueError, \"lambda_ must not be nan\"),\n+]\n+\n+\[email protected](\n+ \"lambda_, err, description\",\n+ invalid_input_of_bipw_init,\n+)\n+def test_bipw_init_using_invalid_inputs(\n+ lambda_,\n+ err,\n+ description,\n+):\n+ with pytest.raises(err, match=f\"{description}*\"):\n+ _ = BalancedInverseProbabilityWeighting(\n+ lambda_=lambda_,\n+ )\n+\n+\n+# prepare bipw instances\n+bipw = BalancedInverseProbabilityWeighting()\n+\n+# action_dist, action, reward, position, estimated_importance_weights, description\n+invalid_input_of_bipw = [\n+ (\n+ generate_action_dist(5, 4, 3),\n+ None, #\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"action must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ None, #\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"reward must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ None, #\n+ \"estimated_importance_weights must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=float), #\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int) - 1, #\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"action elements must be non-negative integers\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ \"4\", #\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"action must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros((3, 2), dtype=int), #\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"action must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int) + 8, #\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ r\"action elements must be smaller than`\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ \"4\", #\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"reward must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros((3, 2), dtype=int), #\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"reward must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(4, dtype=int), #\n+ np.random.choice(3, size=5),\n+ np.ones(5),\n+ \"Expected `action.shape[0]\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ \"4\", #\n+ \"estimated_importance_weights must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.ones((5, 3)), #\n+ \"estimated_importance_weights must be 1D array\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.ones(4), #\n+ \"Expected `action.shape[0]\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ np.arange(5) - 1, #\n+ \"estimated_importance_weights must be non-negative\",\n+ ),\n+ (\n+ generate_action_dist(5, 4, 3),\n+ np.zeros(5, dtype=int),\n+ np.zeros(5, dtype=int),\n+ np.random.choice(3, size=5),\n+ None, #\n+ \"estimated_importance_weights must be 1D array\",\n+ ),\n+]\n+\n+\[email protected](\n+ \"action_dist, action, reward, position, estimated_importance_weights, description\",\n+ invalid_input_of_bipw,\n+)\n+def test_bipw_using_invalid_input_data(\n+ action_dist: np.ndarray,\n+ action: np.ndarray,\n+ reward: np.ndarray,\n+ position: np.ndarray,\n+ estimated_importance_weights: np.ndarray,\n+ description: str,\n+) -> None:\n+ # prepare bipw instances\n+ bipw = BalancedInverseProbabilityWeighting()\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = bipw.estimate_policy_value(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ position=position,\n+ estimated_importance_weights=estimated_importance_weights,\n+ )\n+ with pytest.raises(ValueError, match=f\"{description}*\"):\n+ _ = bipw.estimate_interval(\n+ action_dist=action_dist,\n+ action=action,\n+ reward=reward,\n+ position=position,\n+ estimated_importance_weights=estimated_importance_weights,\n+ )\n+\n+\n+def test_bipw_using_random_evaluation_policy(\n+ synthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n+) -> None:\n+ \"\"\"\n+ Test the format of bipw variants using synthetic bandit data and random evaluation policy\n+ \"\"\"\n+ action_dist = random_action_dist\n+ # prepare input dict\n+ input_dict = {\n+ k: v\n+ for k, v in synthetic_bandit_feedback.items()\n+ if k in [\"reward\", \"action\", \"pscore\", \"position\"]\n+ }\n+ input_dict[\"action_dist\"] = action_dist\n+ # insert dummy values\n+ input_dict[\"estimated_importance_weights\"] = np.ones(action_dist.shape[0])\n+ # check responce\n+ for estimator in [bipw]:\n+ estimated_policy_value = estimator.estimate_policy_value(**input_dict)\n+ assert isinstance(\n+ estimated_policy_value, float\n+ ), f\"invalid type response: {estimator}\"\n+\n+ # make estimated_importance_weights too small (to check the boundedness of snbipw)\n+ input_dict[\"estimated_importance_weights\"] = input_dict[\"pscore\"] ** 3\n+ estimated_policy_value = bipw.estimate_policy_value(**input_dict)\n+ assert (\n+ estimated_policy_value <= 1\n+ ), f\"estimated policy value of bipw should be smaller than or equal to 1 (because of its 1-boundedness), but the value is: {estimated_policy_value}\"\n+\n+ # remove necessary keys\n+ del input_dict[\"reward\"]\n+ del input_dict[\"action\"]\n+ del input_dict[\"estimated_importance_weights\"]\n+ for estimator in [bipw]:\n+ with pytest.raises(\n+ TypeError,\n+ match=re.escape(\n+ \"estimate_policy_value() missing 3 required positional arguments: 'reward', 'action', and 'estimated_importance_weights'\"\n+ ),\n+ ):\n+ _ = estimator.estimate_policy_value(**input_dict)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_dr_estimators.py",
"new_path": "tests/ope/test_dr_estimators.py",
"diff": "@@ -674,7 +674,6 @@ def test_dr_using_random_evaluation_policy(\n), f\"invalid type response: {estimator}\"\n# remove necessary keys\ndel input_dict[\"reward\"]\n- del input_dict[\"pscore\"]\ndel input_dict[\"action\"]\ndel input_dict[\"estimated_rewards_by_reg_model\"]\nfor estimator in dr_estimators:\n@@ -704,7 +703,7 @@ def test_boundedness_of_sndr_using_random_evaluation_policy(\ninput_dict[\"action_dist\"] = action_dist\ninput_dict[\"estimated_rewards_by_reg_model\"] = expected_reward\n# make pscore too small (to check the boundedness of sndr)\n- input_dict[\"estimated_pscore\"] = input_dict[\"pscore\"]\n+ input_dict[\"pscore\"] = input_dict[\"pscore\"] ** 3\nestimated_policy_value = sndr.estimate_policy_value(**input_dict)\nassert (\nestimated_policy_value <= 2\n@@ -765,3 +764,9 @@ def test_switch_dr_using_random_evaluation_policy(\nassert (\ndr_value == switch_dr_max_value\n), \"SwitchDR (lambda=1e10) should be the same as DoublyRobust\"\n+ input_dict[\"estimated_pscore\"] = input_dict[\"pscore\"]\n+ del input_dict[\"pscore\"]\n+ dr_value_estimated_pscore = dr_estimated_pscore.estimate_policy_value(**input_dict)\n+ assert (\n+ dr_value == dr_value_estimated_pscore\n+ ), \"DoublyRobust with estimated_pscore (which is the same as pscore) should be the same as DoublyRobust\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators.py",
"new_path": "tests/ope/test_ipw_estimators.py",
"diff": "@@ -471,6 +471,7 @@ def test_ipw_using_random_evaluation_policy(\nlambdas=[10, 1000], use_estimated_pscore=True\n)\ninput_dict[\"estimated_pscore\"] = input_dict[\"pscore\"]\n+ del input_dict[\"pscore\"]\nfor estimator in [\nipw_estimated_pscore,\nsnipw_estimated_pscore,\n@@ -522,6 +523,7 @@ def test_boundedness_of_snipw_using_random_evaluation_policy(\nuse_estimated_pscore=True\n)\ninput_dict[\"estimated_pscore\"] = input_dict[\"pscore\"]\n+ del input_dict[\"pscore\"]\nestimated_policy_value = snipw_estimated_pscore.estimate_policy_value(**input_dict)\nassert (\nestimated_policy_value <= 1\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add bipw tests |
641,006 | 01.01.2022 22:47:15 | -32,400 | 15080ddc9aafb59f77fdb5befed2111611999768 | add ope performance test | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -82,6 +82,7 @@ __all_estimators__ = [\n\"DoublyRobustWithShrinkage\",\n\"SwitchDoublyRobust\",\n\"SelfNormalizedDoublyRobust\",\n+ \"BalancedInverseProbabilityWeighting\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_all_estimators.py",
"new_path": "tests/ope/test_all_estimators.py",
"diff": "@@ -9,7 +9,7 @@ from obp.ope import OffPolicyEvaluation\nfrom obp.types import BanditFeedback\n-# action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\n+# action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, estimated_pscore, estimated_importance_weights, description\ninvalid_input_of_estimation = [\n(\nNone, #\n@@ -18,6 +18,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nNone,\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"action_dist must be 3D array\",\n),\n(\n@@ -27,6 +29,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nNone,\nnp.zeros((5, 4, 1)),\n+ np.ones(5),\n+ np.ones(5),\n\"action_dist must be 3D array\",\n),\n(\n@@ -36,6 +40,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nNone,\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"action_dist must be a probability distribution\",\n),\n(\n@@ -45,6 +51,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\n\"4\", #\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"position must be 1D array\",\n),\n(\n@@ -54,6 +62,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nnp.zeros((5, 4), dtype=int), #\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"position must be 1D array\",\n),\n(\n@@ -63,6 +73,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nnp.zeros(5), #\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"position elements must be non-negative integers\",\n),\n(\n@@ -72,6 +84,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nnp.zeros(5, dtype=int) - 1, #\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"position elements must be non-negative integers\",\n),\n(\n@@ -81,6 +95,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nnp.zeros(4, dtype=int), #\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"Expected `position.shape[0]\",\n),\n(\n@@ -90,6 +106,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nnp.ones(5, dtype=int) * 8, #\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"position elements must be smaller than\",\n),\n(\n@@ -99,6 +117,8 @@ invalid_input_of_estimation = [\nnp.ones(5),\nNone, #\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"position elements must be given when\",\n),\n]\n@@ -111,6 +131,8 @@ valid_input_of_estimation = [\nnp.ones(5),\nnp.random.choice(3, size=5),\nnp.zeros((5, 4, 3)),\n+ np.ones(5),\n+ np.ones(5),\n\"all arguments are given and len_list > 1\",\n),\n(\n@@ -120,6 +142,8 @@ valid_input_of_estimation = [\nnp.ones(5),\nnp.zeros(5, dtype=int),\nnp.zeros((5, 4, 1)),\n+ np.ones(5),\n+ np.ones(5),\n\"all arguments are given and len_list == 1\",\n),\n(\n@@ -129,13 +153,15 @@ valid_input_of_estimation = [\nnp.ones(5),\nNone,\nnp.zeros((5, 4, 1)),\n+ np.ones(5),\n+ np.ones(5),\n\"position argument is None\",\n),\n]\[email protected](\n- \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\",\n+ \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, estimated_pscore, estimated_importance_weights, description\",\ninvalid_input_of_estimation,\n)\ndef test_estimation_of_all_estimators_using_invalid_input_data(\n@@ -145,6 +171,8 @@ def test_estimation_of_all_estimators_using_invalid_input_data(\npscore: np.ndarray,\nposition: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ estimated_pscore: np.ndarray,\n+ estimated_importance_weights: np.ndarray,\ndescription: str,\n) -> None:\nall_estimators = ope.__all_estimators__\n@@ -166,6 +194,8 @@ def test_estimation_of_all_estimators_using_invalid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\n)\nassert est == 0.0, f\"policy value must be 0, but {est}\"\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n@@ -176,6 +206,8 @@ def test_estimation_of_all_estimators_using_invalid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\n)\nfor estimator_tuning in estimators_tuning:\n@@ -187,6 +219,7 @@ def test_estimation_of_all_estimators_using_invalid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\nassert est == 0.0, f\"policy value must be 0, but {est}\"\nassert hasattr(\n@@ -203,6 +236,7 @@ def test_estimation_of_all_estimators_using_invalid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\nassert hasattr(\nestimator_tuning, \"best_hyperparam\"\n@@ -213,7 +247,7 @@ def test_estimation_of_all_estimators_using_invalid_input_data(\[email protected](\n- \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, description\",\n+ \"action_dist, action, reward, pscore, position, estimated_rewards_by_reg_model, estimated_pscore, estimated_importance_weights, description\",\nvalid_input_of_estimation,\n)\ndef test_estimation_of_all_estimators_using_valid_input_data(\n@@ -223,6 +257,8 @@ def test_estimation_of_all_estimators_using_valid_input_data(\npscore: np.ndarray,\nposition: np.ndarray,\nestimated_rewards_by_reg_model: np.ndarray,\n+ estimated_pscore: np.ndarray,\n+ estimated_importance_weights: np.ndarray,\ndescription: str,\n) -> None:\nall_estimators = ope.__all_estimators__\n@@ -243,6 +279,8 @@ def test_estimation_of_all_estimators_using_valid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\n)\n_ = estimator.estimate_interval(\naction_dist=action_dist,\n@@ -251,6 +289,8 @@ def test_estimation_of_all_estimators_using_valid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\n)\nfor estimator_tuning in estimators_tuning:\n_ = estimator_tuning.estimate_policy_value(\n@@ -260,6 +300,7 @@ def test_estimation_of_all_estimators_using_valid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\n_ = estimator_tuning.estimate_interval(\naction_dist=action_dist,\n@@ -268,6 +309,7 @@ def test_estimation_of_all_estimators_using_valid_input_data(\npscore=pscore,\nposition=position,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n)\n@@ -334,6 +376,9 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\ngetattr(ope.estimators_tuning, estimator_name)([1, 100, 10000, np.inf])\nfor estimator_name in all_estimators_tuning\n]\n+ # TODO\n+ estimated_pscore = None\n+ estimated_importance_weights = np.ones(bandit_feedback[\"action\"].shape[0])\n# estimate_intervals function raises ValueError of all estimators\nfor estimator in estimators:\nwith pytest.raises(err, match=f\"{description}*\"):\n@@ -344,6 +389,8 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\npscore=bandit_feedback[\"pscore\"],\naction_dist=action_dist,\nestimated_rewards_by_reg_model=expected_reward,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -357,6 +404,7 @@ def test_estimate_intervals_of_all_estimators_using_invalid_input_data(\npscore=bandit_feedback[\"pscore\"],\naction_dist=action_dist,\nestimated_rewards_by_reg_model=expected_reward,\n+ estimated_pscore=estimated_pscore,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -391,6 +439,9 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\ngetattr(ope.estimators_tuning, estimator_name)([1, 100, 10000, np.inf])\nfor estimator_name in all_estimators_tuning\n]\n+ # TODO\n+ estimated_pscore = None\n+ estimated_importance_weights = np.ones(bandit_feedback[\"action\"].shape[0])\n# estimate_intervals function raises ValueError of all estimators\nfor estimator in estimators:\n_ = estimator.estimate_interval(\n@@ -400,6 +451,8 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\npscore=bandit_feedback[\"pscore\"],\naction_dist=action_dist,\nestimated_rewards_by_reg_model=expected_reward,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -412,6 +465,7 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\npscore=bandit_feedback[\"pscore\"],\naction_dist=action_dist,\nestimated_rewards_by_reg_model=expected_reward,\n+ estimated_pscore=estimated_pscore,\nalpha=alpha,\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n@@ -460,12 +514,27 @@ def test_performance_of_ope_estimators_using_random_evaluation_policy(\nfor estimator_name in all_estimators_tuning\n]\nestimators = estimators_standard + estimators_tuning\n+ # skip estimation\n+ estimated_pscore = None\n+ estimated_importance_weights = (\n+ random_action_dist[\n+ np.arange(synthetic_bandit_feedback[\"action\"].shape[0]),\n+ synthetic_bandit_feedback[\"action\"],\n+ np.zeros(\n+ synthetic_bandit_feedback[\"action\"].shape[0], dtype=int\n+ ), # position is None\n+ ]\n+ / synthetic_bandit_feedback[\"pscore\"]\n+ )\n# conduct OPE\nope_instance = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=estimators\n)\nestimated_policy_value = ope_instance.estimate_policy_values(\n- action_dist=action_dist, estimated_rewards_by_reg_model=expected_reward\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=expected_reward,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\n)\n# check the performance of OPE\nprint(f\"gt_mean: {gt_mean}\")\n@@ -498,16 +567,33 @@ def test_response_format_of_ope_estimators_using_random_evaluation_policy(\nfor estimator_name in all_estimators_tuning\n]\nestimators = estimators_standard + estimators_tuning\n+ # skip estimation\n+ estimated_pscore = None\n+ estimated_importance_weights = (\n+ random_action_dist[\n+ np.arange(synthetic_bandit_feedback[\"action\"].shape[0]),\n+ synthetic_bandit_feedback[\"action\"],\n+ np.zeros(\n+ synthetic_bandit_feedback[\"action\"].shape[0], dtype=int\n+ ), # position is None\n+ ]\n+ / synthetic_bandit_feedback[\"pscore\"]\n+ )\n# conduct OPE\nope_instance = OffPolicyEvaluation(\nbandit_feedback=synthetic_bandit_feedback, ope_estimators=estimators\n)\nestimated_policy_value = ope_instance.estimate_policy_values(\n- action_dist=action_dist, estimated_rewards_by_reg_model=expected_reward\n+ action_dist=action_dist,\n+ estimated_rewards_by_reg_model=expected_reward,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\n)\nestimated_intervals = ope_instance.estimate_intervals(\naction_dist=action_dist,\nestimated_rewards_by_reg_model=expected_reward,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights,\nrandom_state=12345,\n)\n# check the format of OPE\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_offline_estimation_performance.py",
"new_path": "tests/ope/test_offline_estimation_performance.py",
"diff": "@@ -9,10 +9,12 @@ import pytest\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\n+from sklearn.svm import SVC\nimport torch\nfrom obp.dataset import logistic_reward_function\nfrom obp.dataset import SyntheticBanditDataset\n+from obp.ope import BalancedInverseProbabilityWeighting\nfrom obp.ope import DirectMethod\nfrom obp.ope import DoublyRobust\nfrom obp.ope import DoublyRobustTuning\n@@ -20,7 +22,9 @@ from obp.ope import DoublyRobustWithShrinkage\nfrom obp.ope import DoublyRobustWithShrinkageTuning\nfrom obp.ope import InverseProbabilityWeighting\nfrom obp.ope import InverseProbabilityWeightingTuning\n+from obp.ope import ImportanceWeightEstimator\nfrom obp.ope import OffPolicyEvaluation\n+from obp.ope import PropensityScoreEstimator\nfrom obp.ope import RegressionModel\nfrom obp.ope import SelfNormalizedDoublyRobust\nfrom obp.ope import SelfNormalizedInverseProbabilityWeighting\n@@ -29,7 +33,6 @@ from obp.ope import SwitchDoublyRobustTuning\nfrom obp.ope.estimators import BaseOffPolicyEstimator\nfrom obp.policy import IPWLearner\n-\n# hyperparameters of the regression model used in model dependent OPE estimators\nhyperparams = {\n\"lightgbm\": {\n@@ -50,6 +53,7 @@ hyperparams = {\n\"min_samples_leaf\": 10,\n\"random_state\": 12345,\n},\n+ \"svc\": {\"gamma\": 2, \"C\": 5, \"probability\": True, \"random_state\": 12345},\n}\nbase_model_dict = dict(\n@@ -65,6 +69,7 @@ offline_experiment_configurations = [\n5,\n\"logistic_regression\",\n\"logistic_regression\",\n+ \"logistic_regression\",\n),\n(\n300,\n@@ -72,6 +77,7 @@ offline_experiment_configurations = [\n2,\n\"lightgbm\",\n\"lightgbm\",\n+ \"lightgbm\",\n),\n(\n500,\n@@ -79,6 +85,7 @@ offline_experiment_configurations = [\n3,\n\"random_forest\",\n\"random_forest\",\n+ \"random_forest\",\n),\n(\n500,\n@@ -86,6 +93,7 @@ offline_experiment_configurations = [\n5,\n\"logistic_regression\",\n\"random_forest\",\n+ \"random_forest\",\n),\n(\n800,\n@@ -93,9 +101,29 @@ offline_experiment_configurations = [\n10,\n\"lightgbm\",\n\"logistic_regression\",\n+ \"logistic_regression\",\n),\n]\n+bipw_model_configurations = {\n+ \"bipw (random_forest raw)\": dict(\n+ fitting_method=\"raw\",\n+ base_model=RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ ),\n+ \"bipw (random_forest sample)\": dict(\n+ fitting_method=\"sample\",\n+ base_model=RandomForestClassifier(**hyperparams[\"random_forest\"]),\n+ ),\n+ \"bipw (svc raw)\": dict(\n+ fitting_method=\"raw\",\n+ base_model=SVC(**hyperparams[\"svc\"]),\n+ ),\n+ \"bipw (svc sample)\": dict(\n+ fitting_method=\"sample\",\n+ base_model=SVC(**hyperparams[\"svc\"]),\n+ ),\n+}\n+\n@dataclass\nclass NaiveEstimator(BaseOffPolicyEstimator):\n@@ -147,11 +175,37 @@ ope_estimators = [\nDoublyRobustWithShrinkageTuning(\nlambdas=[100, 1000, np.inf], estimator_name=\"dr-os (tuning)\"\n),\n+ InverseProbabilityWeighting(\n+ lambda_=100,\n+ estimator_name=\"cipw (estimated pscore)\",\n+ use_estimated_pscore=True,\n+ ),\n+ SelfNormalizedInverseProbabilityWeighting(\n+ estimator_name=\"snipw (estimated pscore)\", use_estimated_pscore=True\n+ ),\n+ DoublyRobust(estimator_name=\"dr (estimated pscore)\", use_estimated_pscore=True),\n+ DoublyRobustWithShrinkage(\n+ lambda_=500,\n+ estimator_name=\"dr-os (estimated pscore)\",\n+ use_estimated_pscore=True,\n+ ),\n+ BalancedInverseProbabilityWeighting(\n+ estimator_name=\"bipw (svc raw)\", lambda_=np.inf\n+ ),\n+ BalancedInverseProbabilityWeighting(\n+ estimator_name=\"bipw (svc sample)\", lambda_=np.inf\n+ ),\n+ BalancedInverseProbabilityWeighting(\n+ estimator_name=\"bipw (random_forest raw)\", lambda_=np.inf\n+ ),\n+ BalancedInverseProbabilityWeighting(\n+ estimator_name=\"bipw (random_forest sample)\", lambda_=np.inf\n+ ),\n]\[email protected](\n- \"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model\",\n+ \"n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model, base_model_for_treatment_model\",\noffline_experiment_configurations,\n)\ndef test_offline_estimation_performance(\n@@ -160,6 +214,7 @@ def test_offline_estimation_performance(\ndim_context: int,\nbase_model_for_evaluation_policy: str,\nbase_model_for_reg_model: str,\n+ base_model_for_treatment_model: str,\n) -> None:\ndef process(i: int):\n# synthetic data generator\n@@ -206,6 +261,41 @@ def test_offline_estimation_performance(\nn_folds=3, # 3-fold cross-fitting\nrandom_state=12345,\n)\n+ # fit propensity score estimators\n+ classification_model_for_action = PropensityScoreEstimator(\n+ len_list=1,\n+ n_actions=n_actions,\n+ base_model=base_model_dict[base_model_for_treatment_model](\n+ **hyperparams[base_model_for_treatment_model]\n+ ),\n+ calibration_cv=2,\n+ )\n+ estimated_pscore = classification_model_for_action.fit_predict(\n+ action=bandit_feedback_test[\"action\"],\n+ position=bandit_feedback_test[\"position\"],\n+ context=bandit_feedback_test[\"context\"],\n+ n_folds=3,\n+ evaluate_model_performance=True,\n+ random_state=12345,\n+ )\n+ # fit importance weight estimators\n+ estimated_importance_weights_dict = {}\n+ for clf_name, clf_arguments in bipw_model_configurations.items():\n+ clf = ImportanceWeightEstimator(\n+ len_list=1,\n+ n_actions=n_actions,\n+ fitting_method=clf_arguments[\"fitting_method\"],\n+ base_model=clf_arguments[\"base_model\"],\n+ )\n+ estimated_importance_weights_dict[clf_name] = clf.fit_predict(\n+ action=bandit_feedback_test[\"action\"],\n+ context=bandit_feedback_test[\"context\"],\n+ action_dist=action_dist,\n+ position=bandit_feedback_test[\"position\"],\n+ n_folds=2,\n+ evaluate_model_performance=False,\n+ random_state=12345,\n+ )\n# evaluate estimators' performances using relative estimation error (relative-ee)\nope = OffPolicyEvaluation(\nbandit_feedback=bandit_feedback_test,\n@@ -218,6 +308,8 @@ def test_offline_estimation_performance(\n),\naction_dist=action_dist,\nestimated_rewards_by_reg_model=estimated_rewards_by_reg_model,\n+ estimated_pscore=estimated_pscore,\n+ estimated_importance_weights=estimated_importance_weights_dict,\n)\nreturn relative_ee_i\n@@ -250,3 +342,20 @@ def test_offline_estimation_performance(\nassert relative_ee_df_mean[\"naive\"] > relative_ee_df_mean[\"dr-os (lambda=1)\"]\nassert relative_ee_df_mean[\"naive\"] > relative_ee_df_mean[\"dr-os (lambda=100)\"]\nassert relative_ee_df_mean[\"naive\"] > relative_ee_df_mean[\"dr-os (tuning)\"]\n+ # test estimated_pscore and bipw\n+ estimated_pscore_and_bipw_estimators = [\n+ \"cipw (estimated pscore)\",\n+ \"snipw (estimated pscore)\",\n+ \"dr (estimated pscore)\",\n+ \"dr-os (estimated pscore)\",\n+ \"bipw (svc raw)\",\n+ \"bipw (svc sample)\",\n+ \"bipw (random_forest raw)\",\n+ \"bipw (random_forest sample)\",\n+ ]\n+ for estimator_name in estimated_pscore_and_bipw_estimators:\n+ assert (\n+ relative_ee_df_mean[\"naive\"] > relative_ee_df_mean[estimator_name]\n+ ), f\"{estimator_name} is worse than naive estimator\"\n+ # print(estimator_name, relative_ee_df_mean[estimator_name])\n+ # print(relative_ee_df_mean[\"naive\"])\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add ope performance test |
641,014 | 11.01.2022 06:53:32 | 18,000 | e7cac29b95f86d25976b322acadf59fd935aa1b2 | fix the weight of SGIPW/SGDR | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -1830,7 +1830,7 @@ class SubGaussianInverseProbabilityWeighting(InverseProbabilityWeighting):\n\"\"\"\nn_rounds = action.shape[0]\niw = action_dist[np.arange(n_rounds), action, position] / pscore\n- iw_hat = iw / 1 - self.lambda_ + self.lambda_ * iw\n+ iw_hat = iw / (1 - self.lambda_ + self.lambda_ * iw)\nestimated_rewards = iw_hat * reward\nreturn estimated_rewards\n@@ -1899,7 +1899,7 @@ class SubGaussianInverseProbabilityWeighting(InverseProbabilityWeighting):\n# estimate the (high probability) upper bound of the bias of SGIPW\niw = action_dist[np.arange(n_rounds), action, position] / pscore\n- iw_hat = iw / 1 - self.lambda_ + self.lambda_ * iw\n+ iw_hat = iw / (1 - self.lambda_ + self.lambda_ * iw)\nif use_bias_upper_bound:\nbias_term = estimate_high_probability_upper_bound_bias(\nreward=reward,\n@@ -2022,7 +2022,7 @@ class SubGaussianDoublyRobust(DoublyRobust):\n\"\"\"\nn_rounds = action.shape[0]\niw = action_dist[np.arange(n_rounds), action, position] / pscore\n- iw_hat = iw / 1 - self.lambda_ + self.lambda_ * iw\n+ iw_hat = iw / (1 - self.lambda_ + self.lambda_ * iw)\nq_hat_at_position = estimated_rewards_by_reg_model[\nnp.arange(n_rounds), :, position\n]\n@@ -2109,7 +2109,7 @@ class SubGaussianDoublyRobust(DoublyRobust):\n# estimate the (high probability) upper bound of the bias of SGDR\niw = action_dist[np.arange(n_rounds), action, position] / pscore\n- iw_hat = iw / 1 - self.lambda_ + self.lambda_ * iw\n+ iw_hat = iw / (1 - self.lambda_ + self.lambda_ * iw)\nif use_bias_upper_bound:\nbias_term = estimate_high_probability_upper_bound_bias(\nreward=reward,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix the weight of SGIPW/SGDR |
641,006 | 11.01.2022 23:56:20 | -32,400 | ff27363251d9519dbb6a317f9c34f2d0152947f0 | Apply suggestions from code review
apply 2nd review | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/classification_model.py",
"new_path": "obp/ope/classification_model.py",
"diff": "@@ -18,8 +18,7 @@ from ..utils import check_array, sample_action_fast, check_bandit_feedback_input\n@dataclass\nclass ImportanceWeightEstimator(BaseEstimator):\n- \"\"\"Machine learning model to distinguish between the behavior and evaluation policy (:math:`\\\\Pr[C = 1 | x, a]`),\n- where :math:`\\\\Pr[C=1|x,a]` is the probability that the data coming from the evaluation policy given action :math:`a` and :math:`x`.\n+ \"\"\"Machine learning model to estimate the importance weights induced by behavior and evaluation policies leveraging classifier-based density ratio estimation.\nParameters\n------------\n@@ -36,14 +35,14 @@ class ImportanceWeightEstimator(BaseEstimator):\naction_context: array-like, shape (n_actions, dim_action_context), default=None\nContext vector characterizing action (i.e., vector representation of each action).\nIf not given, one-hot encoding of the action variable is used as default.\n- If fitting_method is 'raw', action_context must be None.\n+ If fitting_method is 'raw', one-hot encoding will be used as action_context.\nfitting_method: str, default='sample'\nMethod to fit the classification model.\nMust be one of ['sample', 'raw']. Each method is defined as follows:\n- - sample: actions are sampled by applying Gumbel-softmax trick to action_dist_at_position, and action features are represented by one-hot encoding of the sampled action.\n+ - sample: actions are sampled from behavior and evaluation policies, respectively, and action features are represented by one-hot encoding of the sampled actions.\n- raw: action_dist_at_position are directly encoded as action features.\n- If fitting_method is 'raw', action_context must be None.\n+ If fitting_method is 'raw', one-hot encoding will be used as action_context.\ncalibration_cv: int, default=2\nNumber of folds in the calibration procedure.\n@@ -75,21 +74,6 @@ class ImportanceWeightEstimator(BaseEstimator):\nraise ValueError(\nf\"fitting_method must be either 'sample' or 'raw', but {self.fitting_method} is given\"\n)\n- if self.fitting_method == \"raw\" and self.action_context is not None:\n- check_array(\n- array=self.action_context, name=\"action_context\", expected_dim=2\n- )\n- if (\n- self.action_context.shape\n- != (\n- self.n_actions,\n- self.n_actions,\n- )\n- or not np.allclose(self.action_context, np.eye(self.n_actions))\n- ):\n- raise ValueError(\n- \"If fitting_method == 'raw', action_context must be None or identity matrix of size (n_actions, n_actions).\"\n- )\nif not isinstance(self.base_model, BaseEstimator):\nraise ValueError(\n\"base_model must be BaseEstimator or a child class of BaseEstimator\"\n@@ -108,7 +92,7 @@ class ImportanceWeightEstimator(BaseEstimator):\nself.base_model_list = [\nclone(self.base_model) for _ in np.arange(self.len_list)\n]\n- if self.action_context is None:\n+ if self.action_context is None or self.fitting_method == \"raw\":\nself.action_context = np.eye(self.n_actions, dtype=int)\ndef fit(\n@@ -134,7 +118,7 @@ class ImportanceWeightEstimator(BaseEstimator):\nposition: array-like, shape (n_rounds,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\n- If None is given, a classification model assumes that there is only one position.\n+ If None is given, a classification model assumes that there is only a single position in a recommendation interface.\nWhen `len_list` > 1, this position argument has to be set.\nrandom_state: int, default=None\n@@ -208,7 +192,7 @@ class ImportanceWeightEstimator(BaseEstimator):\nposition: array-like, shape (n_rounds_of_new_data,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\n- If None is given, a classification model assumes that there is only one position.\n+ If None is given, a classification model assumes that there is only a single position in a recommendation interface.\nWhen `len_list` > 1, this position argument has to be set.\nReturns\n@@ -240,12 +224,11 @@ class ImportanceWeightEstimator(BaseEstimator):\nrandom_state: Optional[int] = None,\nevaluate_model_performance: bool = False,\n) -> np.ndarray:\n- \"\"\"Fit the classification model on given logged bandit feedback data and predict the importance weights of the same data.\n+ \"\"\"Fit the classification model on given logged bandit feedback data and predict the importance weights on the same data, possibly using cross-fitting to avoid over-fitting.\nNote\n------\n- When `n_folds` is larger than 1, then the cross-fitting procedure is applied.\n- See the reference for the details about the cross-fitting technique.\n+ When `n_folds` is larger than 1, the cross-fitting procedure is applied.\nParameters\n----------\n@@ -260,7 +243,7 @@ class ImportanceWeightEstimator(BaseEstimator):\nposition: array-like, shape (n_rounds,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\n- If None is given, a classification model assumes that there is only one position.\n+ If None is given, a classification model assumes that there is only a single position in a recommendation interface.\nWhen `len_list` > 1, this position argument has to be set.\nn_folds: int, default=1\n@@ -274,7 +257,7 @@ class ImportanceWeightEstimator(BaseEstimator):\nevaluate_model_performance: bool, default=False\nWhether the performance of the classification model is evaluated or not.\n- When True is given, the predicted probability of the classification model and the true label of each fold is saved in `self.eval_result[fold]`\n+ If True, the predicted probability of the classification model and the true label of each fold is saved in `self.eval_result[fold]`\nReturns\n-----------\n@@ -390,14 +373,12 @@ class ImportanceWeightEstimator(BaseEstimator):\naction: array-like, shape (n_rounds,)\nAction sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\n- action_context: array-like, shape shape (n_actions, dim_action_context)\n- Context vector characterizing action (i.e., vector representation of each action).\naction_dist_at_position: array-like, shape (n_rounds, n_actions,)\nAction choice probabilities of evaluation policy of each position (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\nsampled_action_at_position: array-like, shape (n_rounds, n_actions,)\n- One-hot encoding of actions sampled by evaluation policy of each position.\n+ Actions sampled by evaluation policy for each data at each position.\n\"\"\"\nbehavior_policy_feature = np.c_[context, self.action_context[action]]\n@@ -434,7 +415,7 @@ class PropensityScoreEstimator(BaseEstimator):\ncalibration_cv: int, default=2\nNumber of folds in the calibration procedure.\n- If calibration_cv <= 1, classification model is not calibrated.\n+ If calibration_cv <= 1, calibration will not be applied.\nReferences\n-----------\n@@ -490,7 +471,7 @@ class PropensityScoreEstimator(BaseEstimator):\nposition: array-like, shape (n_rounds,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\n- If None is given, a classification model assumes that there is only one position.\n+ If None is given, a classification model assumes that there is only a single position in a recommendation interface.\nWhen `len_list` > 1, this position argument has to be set.\n\"\"\"\n@@ -534,7 +515,7 @@ class PropensityScoreEstimator(BaseEstimator):\nposition: array-like, shape (n_rounds_of_new_data,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\n- If None is given, a classification model assumes that there is only one position.\n+ If None is given, a classification model assumes that there is only a single position in a recommendation interface.\nWhen `len_list` > 1, this position argument has to be set.\nReturns\n@@ -562,12 +543,11 @@ class PropensityScoreEstimator(BaseEstimator):\nrandom_state: Optional[int] = None,\nevaluate_model_performance: bool = False,\n) -> np.ndarray:\n- \"\"\"Fit the classification model on given logged bandit feedback data and predict the propensity score of the same data.\n+ \"\"\"Fit the classification model on given logged bandit feedback data and predict the propensity score on the same data, possibly using the cross-fitting procedure to avoid over-fitting.\nNote\n------\n- When `n_folds` is larger than 1, then the cross-fitting procedure is applied.\n- See the reference for the details about the cross-fitting technique.\n+ When `n_folds` is larger than 1, the cross-fitting procedure is applied.\nParameters\n----------\n@@ -579,7 +559,7 @@ class PropensityScoreEstimator(BaseEstimator):\nposition: array-like, shape (n_rounds,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\n- If None is given, a classification model assumes that there is only one position.\n+ If None is given, a classification model assumes that there is only a single position.\nWhen `len_list` > 1, this position argument has to be set.\nn_folds: int, default=1\n@@ -593,7 +573,7 @@ class PropensityScoreEstimator(BaseEstimator):\nevaluate_model_performance: bool, default=False\nWhether the performance of the classification model is evaluated or not.\n- When True is given, the predicted probability of the classification model and the true label of each fold is saved in `self.eval_result[fold]`\n+ If True, the predicted probability of the classification model and the true label of each fold is saved in `self.eval_result[fold]`\nReturns\n-----------\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -1808,11 +1808,11 @@ class DoublyRobustWithShrinkage(DoublyRobust):\n@dataclass\nclass BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\n- \"\"\"Balanced Inverse Probability Weighting (IPW) Estimator.\n+ \"\"\"Balanced Inverse Probability Weighting (B-IPW) Estimator.\nNote\n-------\n- Balanced Inverse Probability Weighting (IPW) estimates the policy value of evaluation policy :math:`\\\\pi_e` by\n+ Balanced Inverse Probability Weighting (B-IPW) estimates the policy value of evaluation policy :math:`\\\\pi_e` by\n.. math::\n@@ -1822,21 +1822,19 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\na behavior policy :math:`\\\\pi_b`.\n:math:`\\\\hat{w}(x,a):=\\\\Pr[C=1|x,a] / \\\\Pr[C=0|x,a]`, where :math:`\\\\Pr[C=1|x,a]` is the probability that the data coming from the evaluation policy given action :math:`a` and :math:`x`.\n:math:`\\\\mathbb{E}_{\\\\mathcal{D}}[\\\\cdot]` is the empirical average over :math:`T` observations in :math:`\\\\mathcal{D}`.\n- When the weight-clipping is applied, a large importance sampling ratio is clipped as :math:`\\\\hat{w_c}(x,a) := \\\\min \\\\{ \\\\lambda, \\\\hat{w}(x,a) \\\\}`\n- where :math:`\\\\lambda (>0)` is a hyperparameter that decides a maximum allowed importance weight.\n+ When the weight-clipping is applied, large importance weights are clipped as :math:`\\\\hat{w_c}(x,a) := \\\\min \\\\{ \\\\lambda, \\\\hat{w}(x,a) \\\\}`\n+ where :math:`\\\\lambda (>0)` is a hyperparameter to define a maximum value allowed for importance weights.\n- Balanced IPW re-weights the rewards by the ratio of the evaluation policy and behavior policy (importance sampling ratio).\n- Balanced IPW can be used even when the behavior policy (or the propensity score of the behavior policy) is not known or the behavior policy is deterministic.\n- When the evaluation policy is stochastic, it is not well known whether the balanced IPW performs well.\n+ B-IPW re-weights the rewards by the importance weights estimated via a supervised classification procedure, and thus can be used even when the behavior policy (or the propensity score of the behavior policy) is not known. `obp.ope.ImportanceWeightEstimator` can be used to estimate the importance weights for B-IPW.\n- Please note that the original estimator of B-IPW is defined as follows (only when the evaluation policy is deterministic):\n+ Note that, in the reference paper, B-IPW is defined as follows (only when the evaluation policy is deterministic):\n.. math::\n- \\\\hat{V}_{\\\\mathrm{B-IPW}} (\\\\pi_e; \\\\mathcal{D}) := \\\\frac{\\\\mathbb{E}_{\\\\mathcal{D}} [\\\\pi_e (a_t|x_t) \\\\hat{w}(x_t,a_t) r_t]}{\\\\mathbb{E}_{\\\\mathcal{D}} [\\\\pi_e (a_t|x_t) \\\\hat{w}(x_t,a_t)},\n+ \\\\hat{V}_{\\\\mathrm{B-IPW}} (\\\\pi_e; \\\\mathcal{D}) := \\\\frac{\\\\mathbb{E}_{\\\\mathcal{D}} [ \\\\hat{w}(x_t,\\\\pi_e (x_t)) r_t]}{\\\\mathbb{E}_{\\\\mathcal{D}} [ \\\\hat{w}(x_t,\\\\pi_e (x_t))},\n- where :math:`\\\\pi_e` is the evaluation policy, and our estimator is a bit different from the original estimator.\n+ where :math:`\\\\pi_e` is a deterministic evaluation policy. We modify this original definition to adjust to stochastic evaluation policies.\nParameters\n------------\n@@ -1888,7 +1886,7 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nAction sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\nestimated_importance_weights: array-like, shape (n_rounds,)\n- Importance weights estimated via supervised classification implemented in `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\n+ Importance weights estimated via supervised classification using `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n@@ -1932,7 +1930,7 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nAction sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\nestimated_importance_weights: array-like, shape (n_rounds,)\n- Importance weights estimated via supervised classification implemented by `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\n+ Importance weights estimated via supervised classification using `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n@@ -1995,7 +1993,7 @@ class BalancedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nAction sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.\nestimated_importance_weights: array-like, shape (n_rounds,)\n- Importance weights estimated via supervised classification implemented by `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\n+ Importance weights estimated via supervised classification using `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_t|x_t)`.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -150,7 +150,7 @@ class BaseOffPolicyEstimatorTuning:\nposition: array-like, shape (n_rounds,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\nWhen None is given, the effect of position on the reward will be ignored.\n- (If only one action is chosen and there is no posion, then you can just ignore this argument.)\n+ (If only a single action is chosen at each round, you can just ignore this argument.)\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n@@ -228,7 +228,7 @@ class BaseOffPolicyEstimatorTuning:\nposition: array-like, shape (n_rounds,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\nWhen None is given, the effect of position on the reward will be ignored.\n- (If only one action is chosen and there is no posion, then you can just ignore this argument.)\n+ (If only a single action is chosen at each round, you can just ignore this argument.)\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n@@ -348,7 +348,7 @@ class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):\nposition: array-like, shape (n_rounds,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\nWhen None is given, the effect of position on the reward will be ignored.\n- (If only one action is chosen and there is no posion, then you can just ignore this argument.)\n+ (If only a single action is chosen at each round, you can just ignore this argument.)\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n@@ -421,7 +421,7 @@ class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):\nposition: array-like, shape (n_rounds,), default=None\nPosition of recommendation interface where action was presented in each round of the given logged bandit data.\nWhen None is given, the effect of position on the reward will be ignored.\n- (If only one action is chosen and there is no posion, then you can just ignore this argument.)\n+ (If only a single action is chosen at each round, you can just ignore this argument.)\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta.py",
"new_path": "obp/ope/meta.py",
"diff": "@@ -227,12 +227,12 @@ class OffPolicyEvaluation:\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nestimated_importance_weights: array-like, shape (n_rounds,) or Dict[str, array-like], default=None\nImportance weights estimated via supervised classification implemented by `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nReturns\n@@ -290,12 +290,12 @@ class OffPolicyEvaluation:\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nestimated_importance_weights: array-like, shape (n_rounds,) or Dict[str, array-like], default=None\nImportance weights estimated via supervised classification implemented by `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nalpha: float, default=0.05\nSignificance level.\n@@ -578,12 +578,12 @@ class OffPolicyEvaluation:\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nestimated_importance_weights: array-like, shape (n_rounds,) or Dict[str, array-like], default=None\nImportance weights estimated via supervised classification implemented by `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nmetric: str, default=\"relative-ee\"\nEvaluation metric used to evaluate and compare the estimation performance of OPE estimators.\n@@ -660,12 +660,12 @@ class OffPolicyEvaluation:\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nestimated_importance_weights: array-like, shape (n_rounds,) or Dict[str, array-like], default=None\nImportance weights estimated via supervised classification implemented by `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nmetric: str, default=\"relative-ee\"\nEvaluation metric used to evaluate and compare the estimation performance of OPE estimators.\n@@ -727,12 +727,12 @@ class OffPolicyEvaluation:\nestimated_pscore: array-like, shape (n_rounds,), default=None\nEstimated action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\\\hat{\\\\pi}_b(a_t|x_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nestimated_importance_weights: array-like, shape (n_rounds,) or Dict[str, array-like], default=None\nImportance weights estimated via supervised classification implemented by `obp.ope.ImportanceWeightEstimator`, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\nWhen an array-like is given, all OPE estimators use it.\n- When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.\n+ When a dict with an estimator's name as its key is given, the corresponding value is used for the estimator.\nalpha: float, default=0.05\nSignificance level.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -331,7 +331,7 @@ def check_ope_inputs(\nExpected rewards given context, action, and position estimated by regression model, i.e., :math:`\\\\hat{q}(x_t,a_t)`.\nestimated_importance_weights: array-like, shape (n_rounds,), default=None\n- Importance weights estimated via supervised classification, i.e., :math:`\\\\hat{ww}(x_t, a_t)`.\n+ Importance weights estimated via supervised classification, i.e., :math:`\\\\hat{w}(x_t, a_t)`.\n\"\"\"\n# action_dist\ncheck_array(array=action_dist, name=\"action_dist\", expected_dim=3)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_offline_estimation_performance.py",
"new_path": "tests/ope/test_offline_estimation_performance.py",
"diff": "@@ -212,9 +212,9 @@ def test_offline_estimation_performance(\nn_rounds: int,\nn_actions: int,\ndim_context: int,\n- base_model_for_evaluation_policy: str,\n+ base_model_for_iw_estimator: str,\nbase_model_for_reg_model: str,\n- base_model_for_treatment_model: str,\n+ base_model_for_pscore_estimator: str,\n) -> None:\ndef process(i: int):\n# synthetic data generator\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | Apply suggestions from code review
apply 2nd review
Co-authored-by: yuta-saito <[email protected]> |
641,014 | 11.01.2022 12:46:04 | 18,000 | 11631614251eebf80c84e26d1722e44d087543e5 | implement synthetic data generator class for multiple loggers | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -11,6 +11,7 @@ from obp.dataset.synthetic import polynomial_behavior_policy\nfrom obp.dataset.synthetic import polynomial_reward_function\nfrom obp.dataset.synthetic import sparse_reward_function\nfrom obp.dataset.synthetic import SyntheticBanditDataset\n+from obp.dataset.synthetic_multi import SyntheticBanditDatasetWithMultiLoggers\nfrom obp.dataset.synthetic_continuous import linear_behavior_policy_continuous\nfrom obp.dataset.synthetic_continuous import linear_reward_funcion_continuous\nfrom obp.dataset.synthetic_continuous import linear_synthetic_policy_continuous\n@@ -47,4 +48,5 @@ __all__ = [\n\"SyntheticSlateBanditDataset\",\n\"action_interaction_reward_function\",\n\"linear_behavior_policy_logit\",\n+ \"SyntheticBanditDatasetWithMultiLoggers\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.\n# Licensed under the Apache 2.0 License.\n-\"\"\"Class for Generating Synthetic Logged Bandit Feedback.\"\"\"\n+\"\"\"Class for Generating Synthetic Logged Bandit Data.\"\"\"\nfrom dataclasses import dataclass\nfrom typing import Callable\nfrom typing import Optional\n@@ -23,7 +23,7 @@ from .reward_type import RewardType\n@dataclass\nclass SyntheticBanditDataset(BaseBanditDataset):\n- \"\"\"Class for generating synthetic bandit dataset.\n+ \"\"\"Class for generating synthetic bandit data.\nNote\n-----\n@@ -69,7 +69,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nIf `beta` is large, the behavior policy becomes near-deterministic/near-optimal,\nwhile a small or negative value of `beta` leads to a sub-optimal behavior policy.\n- beta: float, default=1.0\n+ beta: int or float, default=1.0\nInverse temperature parameter, which controls the optimality and entropy of the behavior policy.\nA large value leads to a near-deterministic behavior policy,\nwhile a small value leads to a near-uniform behavior policy.\n@@ -80,7 +80,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nNumber of deficient actions having zero probability of being selected in the logged bandit data.\nIf there are some deficient actions, the full/common support assumption is very likely to be violated,\nleading to some bias for IPW-type estimators. See Sachdeva et al.(2020) for details.\n- `n_deficient_actions` should be an integer smaller than `n_actions - 1` so that there exists at least one actions\n+ `n_deficient_actions` should be an integer smaller than `n_actions - 1` so that there exists at least one action\nthat have a positive probability of being selected by the behavior policy.\nrandom_state: int, default=12345\n@@ -179,16 +179,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\n\"\"\"Initialize Class.\"\"\"\ncheck_scalar(self.n_actions, \"n_actions\", int, min_val=2)\ncheck_scalar(self.dim_context, \"dim_context\", int, min_val=1)\n- if RewardType(self.reward_type) not in [\n- RewardType.BINARY,\n- RewardType.CONTINUOUS,\n- ]:\n- raise ValueError(\n- f\"`reward_type` must be either '{RewardType.BINARY.value}' or '{RewardType.CONTINUOUS.value}',\"\n- f\"but {self.reward_type} is given.'\"\n- )\ncheck_scalar(self.beta, \"beta\", (int, float))\n- check_scalar(self.reward_std, \"reward_std\", (int, float), min_val=0)\ncheck_scalar(\nself.n_deficient_actions,\n\"n_deficient_actions\",\n@@ -196,15 +187,27 @@ class SyntheticBanditDataset(BaseBanditDataset):\nmin_val=0,\nmax_val=self.n_actions - 1,\n)\n+\nif self.random_state is None:\nraise ValueError(\"`random_state` must be given\")\nself.random_ = check_random_state(self.random_state)\n+\n+ if RewardType(self.reward_type) not in [\n+ RewardType.BINARY,\n+ RewardType.CONTINUOUS,\n+ ]:\n+ raise ValueError(\n+ f\"`reward_type` must be either '{RewardType.BINARY.value}' or '{RewardType.CONTINUOUS.value}',\"\n+ f\"but {self.reward_type} is given.'\"\n+ )\n+ check_scalar(self.reward_std, \"reward_std\", (int, float), min_val=0)\nif self.reward_function is None:\nself.expected_reward = self.sample_contextfree_expected_reward()\nif RewardType(self.reward_type) == RewardType.CONTINUOUS:\nself.reward_min = 0\nself.reward_max = 1e10\n- # one-hot encoding representations characterizing actions.\n+\n+ # one-hot encoding characterizing actions.\nif self.action_context is None:\nself.action_context = np.eye(self.n_actions, dtype=int)\nelse:\n@@ -213,7 +216,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\n)\nif self.action_context.shape[0] != self.n_actions:\nraise ValueError(\n- \"Expected `action_context.shape[0] == n_actions`, but found it False.'\"\n+ \"Expected `action_context.shape[0] == n_actions`, but found it False.\"\n)\n@property\n@@ -305,7 +308,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\nReturns\n---------\nbandit_feedback: BanditFeedback\n- Generated synthetic logged bandit dataset.\n+ Synthesized logged bandit data.\n\"\"\"\ncheck_scalar(n_rounds, \"n_rounds\", int, min_val=1)\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -287,7 +287,7 @@ class SyntheticSlateBanditDataset(BaseBanditDataset):\nself.reward_min = 0\nself.reward_max = 1e10\nself.reward_std = 1.0\n- # one-hot encoding representations characterizing each action\n+ # one-hot encoding characterizing each action\nself.action_context = np.eye(self.n_unique_action, dtype=int)\ndef obtain_standard_decay_action_interaction_weight_matrix(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement synthetic data generator class for multiple loggers |
641,006 | 12.01.2022 02:47:32 | -32,400 | 5ef06b30a43a78931713673d95626e84a290fbb3 | remove action_context testing when fitting method is raw | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_importance_weight_estimator.py",
"new_path": "tests/ope/test_importance_weight_estimator.py",
"diff": "@@ -95,16 +95,6 @@ invalid_input_of_initializing_importance_weight_estimator = [\nValueError,\n\"fitting_method must be either 'sample' or 'raw', but awesome is given\",\n),\n- (\n- np.random.uniform(size=(n_actions, 8)), #\n- n_actions,\n- len_list,\n- \"raw\", #\n- RandomForestClassifier(**hyperparams[\"random_forest\"]),\n- 2,\n- ValueError,\n- \"If fitting_method == 'raw', action_context must be None or identity matrix\",\n- ),\n(\nnp.random.uniform(size=(n_actions, 8)),\nn_actions,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | remove action_context testing when fitting method is raw |
641,014 | 21.01.2022 22:15:19 | 18,000 | 18e4f7523cfc75704a7f172516c717bee342eb60 | add weighted dr | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/__init__.py",
"new_path": "obp/ope/__init__.py",
"diff": "@@ -27,6 +27,7 @@ from obp.ope.estimators_multi import MultiLoggersBalancedDoublyRobust\nfrom obp.ope.estimators_multi import MultiLoggersBalancedInverseProbabilityWeighting\nfrom obp.ope.estimators_multi import MultiLoggersNaiveDoublyRobust\nfrom obp.ope.estimators_multi import MultiLoggersNaiveInverseProbabilityWeighting\n+from obp.ope.estimators_multi import MultiLoggersWeightedDoublyRobust\nfrom obp.ope.estimators_multi import MultiLoggersWeightedInverseProbabilityWeighting\nfrom obp.ope.estimators_slate import SelfNormalizedSlateIndependentIPS\nfrom obp.ope.estimators_slate import SelfNormalizedSlateRewardInteractionIPS\n@@ -73,6 +74,7 @@ __all__ = [\n\"MultiLoggersBalancedInverseProbabilityWeighting\",\n\"MultiLoggersNaiveDoublyRobust\",\n\"MultiLoggersBalancedDoublyRobust\",\n+ \"MultiLoggersWeightedDoublyRobust\",\n\"OffPolicyEvaluation\",\n\"SlateOffPolicyEvaluation\",\n\"ContinuousOffPolicyEvaluation\",\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_multi.py",
"new_path": "obp/ope/estimators_multi.py",
"diff": "@@ -702,25 +702,25 @@ class MultiLoggersWeightedInverseProbabilityWeighting(\nfor k in unique_stratum_idx:\nidx_ = stratum_idx == k\nvar_k[k] = np.var(reward[idx_] * iw[idx_])\n- inv_weight_k = var_k * np.sum(n_data_strata / var_k)\n+ weight_k = n_data_strata / (var_k * np.sum(n_data_strata / var_k))\n- return reward * iw * (1.0 / inv_weight_k[stratum_idx])\n+ return reward * iw * weight_k[stratum_idx]\n@dataclass\nclass MultiLoggersNaiveDoublyRobust(BaseMultiLoggersOffPolicyEstimator):\n- \"\"\"Multi-Loggers Naive Doubly Robust (Multi-DR) Estimator.\n+ \"\"\"Multi-Loggers Naive Doubly Robust (Multi-Naive-DR) Estimator.\nNote\n-------\nThis estimator is called Average DR in Kallus et al.(2021).\n- Multi-DR estimates the policy value of evaluation policy :math:`\\\\pi_e`\n+ Multi-Naive-DR estimates the policy value of evaluation policy :math:`\\\\pi_e`\nusing logged data collected by multiple logging/behavior policies as\n.. math::\n- \\\\hat{V}_{\\\\mathrm{Multi-DR}} (\\\\pi_e; \\\\mathcal{D}, \\\\hat{q})\n+ \\\\hat{V}_{\\\\mathrm{Multi-Naive-DR}} (\\\\pi_e; \\\\mathcal{D}, \\\\hat{q})\n:= \\\\sum_{k=1}^K \\\\rho_k \\\\mathbb{E}_{n_k} [\\\\hat{q}(x_i,\\\\pi_e) + w_k(x_i,a_i) (r_i - \\\\hat{q}(x_i,a_i))],\nwhere :math:`\\\\mathcal{D}_k=\\\\{(x_i,a_i,r_i)\\\\}_{i=1}^{n_k}` is logged bandit data with :math:`n_k` observations collected by\n@@ -732,7 +732,7 @@ class MultiLoggersNaiveDoublyRobust(BaseMultiLoggersOffPolicyEstimator):\n:math:`\\\\hat{q} (x_i,\\\\pi):= \\\\mathbb{E}_{a \\\\sim \\\\pi(a|x)}[\\\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\\\pi`.\nWhen the clipping is applied, a large importance weight is clipped as :math:`\\\\hat{w}_k(x,a) := \\\\min \\\\{ \\\\lambda, w_k(x,a) \\\\}`, where :math:`\\\\lambda (>0)` is a hyperparameter to specify a maximum allowed importance weight.\n- Multi-DR applies the standard DR to each stratum and takes the weighted average of the K datasets.\n+ Multi-Naive-DR applies the standard DR to each stratum and takes the weighted average of the K datasets.\nParameters\n------------\n@@ -743,7 +743,7 @@ class MultiLoggersNaiveDoublyRobust(BaseMultiLoggersOffPolicyEstimator):\nuse_estimated_pscore: bool, default=False.\nIf True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.\n- estimator_name: str, default='multi_ipw'.\n+ estimator_name: str, default='multi_dr'.\nName of the estimator.\nReferences\n@@ -820,7 +820,6 @@ class MultiLoggersNaiveDoublyRobust(BaseMultiLoggersOffPolicyEstimator):\nEstimated rewards for each observation.\n\"\"\"\n- n_rounds = action.shape[0]\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\nrho_k = np.unique(stratum_idx, return_counts=True)[1] / stratum_idx.shape[0]\n@@ -828,13 +827,11 @@ class MultiLoggersNaiveDoublyRobust(BaseMultiLoggersOffPolicyEstimator):\n# weight clipping\nif isinstance(iw, np.ndarray):\niw = np.minimum(iw, self.lambda_)\n- q_hat_at_position = estimated_rewards_by_reg_model[\n- np.arange(n_rounds), :, position\n- ]\n- q_hat_factual = estimated_rewards_by_reg_model[\n- np.arange(n_rounds), action, position\n- ]\n- pi_e_at_position = action_dist[np.arange(n_rounds), :, position]\n+\n+ n = action.shape[0]\n+ q_hat_at_position = estimated_rewards_by_reg_model[np.arange(n), :, position]\n+ q_hat_factual = estimated_rewards_by_reg_model[np.arange(n), action, position]\n+ pi_e_at_position = action_dist[np.arange(n), :, position]\nestimated_rewards = np.average(\nq_hat_at_position,\nweights=pi_e_at_position,\n@@ -1147,20 +1144,17 @@ class MultiLoggersBalancedDoublyRobust(BaseMultiLoggersOffPolicyEstimator):\nEstimated rewards for each observation.\n\"\"\"\n- n_rounds = action.shape[0]\nif position is None:\nposition = np.zeros(action_dist.shape[0], dtype=int)\niw_avg = action_dist[np.arange(action.shape[0]), action, position] / pscore_avg\n# weight clipping\nif isinstance(iw_avg, np.ndarray):\niw_avg = np.minimum(iw_avg, self.lambda_)\n- q_hat_at_position = estimated_rewards_by_reg_model[\n- np.arange(n_rounds), :, position\n- ]\n- q_hat_factual = estimated_rewards_by_reg_model[\n- np.arange(n_rounds), action, position\n- ]\n- pi_e_at_position = action_dist[np.arange(n_rounds), :, position]\n+\n+ n = action.shape[0]\n+ q_hat_at_position = estimated_rewards_by_reg_model[np.arange(n), :, position]\n+ q_hat_factual = estimated_rewards_by_reg_model[np.arange(n), action, position]\n+ pi_e_at_position = action_dist[np.arange(n), :, position]\nestimated_rewards = np.average(\nq_hat_at_position,\nweights=pi_e_at_position,\n@@ -1351,3 +1345,132 @@ class MultiLoggersBalancedDoublyRobust(BaseMultiLoggersOffPolicyEstimator):\nn_bootstrap_samples=n_bootstrap_samples,\nrandom_state=random_state,\n)\n+\n+\n+@dataclass\n+class MultiLoggersWeightedDoublyRobust(MultiLoggersNaiveDoublyRobust):\n+ \"\"\"Multi-Loggers Naive Doubly Robust (Multi-Weighted-DR) Estimator.\n+\n+ Note\n+ -------\n+ This estimator is called Prevision Weighted DR in Kallus et al.(2021).\n+\n+ Multi-Weighted-DR estimates the policy value of evaluation policy :math:`\\\\pi_e`\n+ using logged data collected by multiple logging/behavior policies as\n+\n+ .. math::\n+\n+ \\\\hat{V}_{\\\\mathrm{Multi-Weighted-DR}} (\\\\pi_e; \\\\mathcal{D}, \\\\hat{q})\n+ := \\\\sum_{k=1}^K \\\\M^{*}_k \\\\mathbb{E}_{n_k} [\\\\hat{q}(x_i,\\\\pi_e) + w_k(x_i,a_i) (r_i - \\\\hat{q}(x_i,a_i))],\n+\n+ where :math:`\\\\mathcal{D}_k=\\\\{(x_i,a_i,r_i)\\\\}_{i=1}^{n_k}` is logged bandit data with :math:`n_k` observations collected by\n+ the k-th behavior policy :math:`\\\\pi_k`. :math:`w_k(x,a):=\\\\pi_e (a|x)/\\\\pi_k (a|x)` is the importance weight given :math:`x` and :math:`a` computed for the k-th behavior policy.\n+ We can represent the whole logged bandit data as :math:`\\\\mathcal{D}_k=\\\\{(k_i,x_i,a_i,r_i)\\\\}_{i=1}^{n}` where :math:`k_i` is the index to indicate the logging/behavior policy that generates i-th data, i.e., :math:`\\\\pi_{k_i}`.\n+ Note that :math:`n := \\\\sum_{k=1}^K` is the total number of logged bandit data, and :math:`\\\\rho_k := n_k / n` is the dataset proporsions.\n+ :math:`\\\\mathbb{E}_{n}[\\\\cdot]` is the empirical average over :math:`n` observations in :math:`\\\\mathcal{D}`.\n+ :math:`\\\\hat{q} (x,a)` is the estimated expected reward given :math:`x` and :math:`a`.\n+ :math:`\\\\hat{q} (x_i,\\\\pi):= \\\\mathbb{E}_{a \\\\sim \\\\pi(a|x)}[\\\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\\\pi`.\n+ When the clipping is applied, a large importance weight is clipped as :math:`\\\\hat{w}_k(x,a) := \\\\min \\\\{ \\\\lambda, w_k(x,a) \\\\}`, where :math:`\\\\lambda (>0)` is a hyperparameter to specify a maximum allowed importance weight.\n+\n+ Multi-Weighted-DR prioritizes the strata generaetd by the logging/behavior policies similar to the evaluation policy.\n+ The weight for the k-th logging/behavior policy :math:`\\\\M^*_k` is defined based on\n+ the divergence between the evaluation policy :math:`\\\\pi_e` and :math:`\\\\pi_k`.\n+\n+ Parameters\n+ ------------\n+ lambda_: float, default=np.inf\n+ A maximum possible value of the importance weight.\n+ When a positive finite value is given, importance weights larger than `lambda_` will be clipped.\n+\n+ use_estimated_pscore: bool, default=False.\n+ If True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.\n+\n+ estimator_name: str, default='multi_weighted_dr'.\n+ Name of the estimator.\n+\n+ References\n+ ------------\n+ Aman Agarwal, Soumya Basu, Tobias Schnabel, and Thorsten Joachims.\n+ \"Effective Evaluation using Logged Bandit Feedback from Multiple Loggers.\", 2018.\n+\n+ Nathan Kallus, Yuta Saito, and Masatoshi Uehara.\n+ \"Optimal Off-Policy Evaluation from Multiple Logging Policies.\", 2021.\n+\n+ \"\"\"\n+\n+ estimator_name: str = \"multi_weighted_dr\"\n+\n+ def _estimate_round_rewards(\n+ self,\n+ reward: np.ndarray,\n+ action: np.ndarray,\n+ pscore: np.ndarray,\n+ stratum_idx: np.ndarray,\n+ action_dist: np.ndarray,\n+ estimated_rewards_by_reg_model: np.ndarray,\n+ position: Optional[np.ndarray] = None,\n+ **kwargs,\n+ ) -> np.ndarray:\n+ \"\"\"Estimate round-wise (or sample-wise) rewards.\n+\n+ Parameters\n+ ----------\n+ reward: array-like, shape (n_rounds,)\n+ Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.\n+\n+ action: array-like, shape (n_rounds,)\n+ Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.\n+\n+ pscore: array-like, shape (n_rounds,)\n+ Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\\\pi_k(a_i|x_i)`.\n+ If `use_estimated_pscore` is False, `pscore` must be given.\n+\n+ stratum_idx: array-like, shape (n_rounds,)\n+ Indices to differentiate the logging/behavior policy that generate each data, i.e., :math:`k`.\n+\n+ action_dist: array-like, shape (n_rounds, n_actions, len_list)\n+ Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_i|x_i)`.\n+\n+ estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)\n+ Estimated expected rewards given context, action, and position, i.e., :math:`\\\\hat{q}(x_i,a_i)`.\n+\n+ position: array-like, shape (n_rounds,), default=None\n+ Indices to differentiate positions in a recommendation interface where the actions are presented.\n+ If None, the effect of position on the reward will be ignored.\n+ (If only a single action is chosen for each data, you can just ignore this argument.)\n+\n+ Returns\n+ ----------\n+ estimated_rewards: array-like, shape (n_rounds,)\n+ Estimated rewards for each observation.\n+\n+ \"\"\"\n+ if position is None:\n+ position = np.zeros(action_dist.shape[0], dtype=int)\n+ iw = action_dist[np.arange(action.shape[0]), action, position] / pscore\n+ # weight clipping\n+ if isinstance(iw, np.ndarray):\n+ iw = np.minimum(iw, self.lambda_)\n+\n+ n = action.shape[0]\n+ q_hat_at_position = estimated_rewards_by_reg_model[np.arange(n), :, position]\n+ q_hat_factual = estimated_rewards_by_reg_model[np.arange(n), action, position]\n+ pi_e_at_position = action_dist[np.arange(n), :, position]\n+ estimated_rewards = np.average(\n+ q_hat_at_position,\n+ weights=pi_e_at_position,\n+ axis=1,\n+ )\n+\n+ unique_stratum_idx, n_data_strata = np.unique(stratum_idx, return_counts=True)\n+ var_k = np.zeros(unique_stratum_idx.shape[0])\n+ for k in unique_stratum_idx:\n+ idx_ = stratum_idx == k\n+ var_k[k] = np.var(\n+ estimated_rewards[idx_]\n+ + iw[idx_] * (reward[idx_] - q_hat_factual[idx_])\n+ )\n+ weight_k = n_data_strata / (var_k * np.sum(n_data_strata / var_k))\n+ estimated_rewards += iw * (reward - q_hat_factual) * weight_k[stratum_idx]\n+\n+ return estimated_rewards\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add weighted dr |
641,014 | 21.01.2022 22:31:41 | 18,000 | f51b4698dcd14acdeed6f8b324a02f9b0827bce0 | rename synthetic multi loggers dataset class | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -18,7 +18,7 @@ from obp.dataset.synthetic_continuous import quadratic_reward_funcion_continuous\nfrom obp.dataset.synthetic_continuous import sign_synthetic_policy_continuous\nfrom obp.dataset.synthetic_continuous import SyntheticContinuousBanditDataset\nfrom obp.dataset.synthetic_continuous import threshold_synthetic_policy_continuous\n-from obp.dataset.synthetic_multi import SyntheticBanditDatasetWithMultiLoggers\n+from obp.dataset.synthetic_multi import SyntheticMultiLoggersBanditDataset\nfrom obp.dataset.synthetic_slate import action_interaction_reward_function\nfrom obp.dataset.synthetic_slate import linear_behavior_policy_logit\nfrom obp.dataset.synthetic_slate import SyntheticSlateBanditDataset\n@@ -48,5 +48,5 @@ __all__ = [\n\"SyntheticSlateBanditDataset\",\n\"action_interaction_reward_function\",\n\"linear_behavior_policy_logit\",\n- \"SyntheticBanditDatasetWithMultiLoggers\",\n+ \"SyntheticMultiLoggersBanditDataset\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_multi.py",
"new_path": "obp/dataset/synthetic_multi.py",
"diff": "@@ -20,7 +20,7 @@ from .synthetic import SyntheticBanditDataset\n@dataclass\n-class SyntheticBanditDatasetWithMultiLoggers(SyntheticBanditDataset):\n+class SyntheticMultiLoggersBanditDataset(SyntheticBanditDataset):\n\"\"\"Class for synthesizing bandit data with multiple logging/behavior policies.\nNote\n@@ -97,7 +97,7 @@ class SyntheticBanditDatasetWithMultiLoggers(SyntheticBanditDataset):\n.. code-block:: python\n>>> from obp.dataset import (\n- SyntheticBanditDatasetWithMultiLoggers,\n+ SyntheticMultiLoggersBanditDataset,\nlogistic_reward_function\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_multi.py",
"new_path": "tests/dataset/test_synthetic_multi.py",
"diff": "import numpy as np\nimport pytest\n-from obp.dataset import SyntheticBanditDatasetWithMultiLoggers\n+from obp.dataset import SyntheticMultiLoggersBanditDataset\n# n_actions, dim_context, reward_type, reward_std, betas, rhos, n_deficient_actions, action_context, random_state, err, description\n@@ -326,7 +326,7 @@ def test_synthetic_multi_init_using_invalid_inputs(\ndescription,\n):\nwith pytest.raises(err, match=f\"{description}*\"):\n- _ = SyntheticBanditDatasetWithMultiLoggers(\n+ _ = SyntheticMultiLoggersBanditDataset(\nn_actions=n_actions,\ndim_context=dim_context,\nreward_type=reward_type,\n@@ -344,13 +344,13 @@ def test_synthetic_obtain_batch_bandit_feedback():\nrhos = [1, 1, 1]\n# n_rounds\nwith pytest.raises(ValueError):\n- dataset = SyntheticBanditDatasetWithMultiLoggers(\n+ dataset = SyntheticMultiLoggersBanditDataset(\nn_actions=2, betas=betas, rhos=rhos\n)\ndataset.obtain_batch_bandit_feedback(n_rounds=0)\nwith pytest.raises(TypeError):\n- dataset = SyntheticBanditDatasetWithMultiLoggers(\n+ dataset = SyntheticMultiLoggersBanditDataset(\nn_actions=2, betas=betas, rhos=rhos\n)\ndataset.obtain_batch_bandit_feedback(n_rounds=\"3\")\n@@ -359,7 +359,7 @@ def test_synthetic_obtain_batch_bandit_feedback():\nn_rounds = 10\nn_actions = 5\nfor n_deficient_actions in [0, 2]:\n- dataset = SyntheticBanditDatasetWithMultiLoggers(\n+ dataset = SyntheticMultiLoggersBanditDataset(\nn_actions=n_actions,\nbetas=betas,\nrhos=rhos,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/conftest.py",
"new_path": "tests/ope/conftest.py",
"diff": "@@ -10,7 +10,7 @@ from sklearn.utils import check_random_state\nfrom obp.dataset import linear_behavior_policy\nfrom obp.dataset import logistic_reward_function\nfrom obp.dataset import SyntheticBanditDataset\n-from obp.dataset import SyntheticBanditDatasetWithMultiLoggers\n+from obp.dataset import SyntheticMultiLoggersBanditDataset\nfrom obp.dataset import SyntheticContinuousBanditDataset\nfrom obp.dataset import SyntheticSlateBanditDataset\nfrom obp.policy import Random\n@@ -90,7 +90,7 @@ def synthetic_multi_bandit_feedback() -> BanditFeedback:\nrhos = [1, 2, 3, 2, 1]\nrandom_state = 12345\nn_rounds = 10000\n- dataset = SyntheticBanditDatasetWithMultiLoggers(\n+ dataset = SyntheticMultiLoggersBanditDataset(\nn_actions=n_actions,\ndim_context=dim_context,\nbetas=betas,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | rename synthetic multi loggers dataset class |
641,014 | 13.02.2022 12:13:51 | 18,000 | ef167582fbee65723faa00f1b7e443f25988847f | implement SyntheticBanditDatasetWithActionEmbeds | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/__init__.py",
"new_path": "obp/dataset/__init__.py",
"diff": "@@ -18,6 +18,7 @@ from obp.dataset.synthetic_continuous import quadratic_reward_funcion_continuous\nfrom obp.dataset.synthetic_continuous import sign_synthetic_policy_continuous\nfrom obp.dataset.synthetic_continuous import SyntheticContinuousBanditDataset\nfrom obp.dataset.synthetic_continuous import threshold_synthetic_policy_continuous\n+from obp.dataset.synthetic_embed import SyntheticBanditDatasetWithActionEmbeds\nfrom obp.dataset.synthetic_multi import SyntheticBanditDatasetWithMultiLoggers\nfrom obp.dataset.synthetic_slate import action_interaction_reward_function\nfrom obp.dataset.synthetic_slate import linear_behavior_policy_logit\n@@ -49,4 +50,5 @@ __all__ = [\n\"action_interaction_reward_function\",\n\"linear_behavior_policy_logit\",\n\"SyntheticBanditDatasetWithMultiLoggers\",\n+ \"SyntheticBanditDatasetWithActionEmbeds\",\n]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "obp/dataset/synthetic_embed.py",
"diff": "+# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.\n+# Licensed under the Apache 2.0 License.\n+\n+\"\"\"Class for Generating Synthetic Logged Bandit Data with Action Embeddings.\"\"\"\n+from dataclasses import dataclass\n+from typing import Callable\n+from typing import Optional\n+\n+import numpy as np\n+from sklearn.utils import check_scalar\n+\n+from ..types import BanditFeedback\n+from ..utils import sample_action_fast\n+from ..utils import softmax\n+from .reward_type import RewardType\n+from .synthetic import SyntheticBanditDataset\n+\n+\n+@dataclass\n+class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\n+ \"\"\"Class for synthesizing bandit data with action/item category information or embeddings.\n+\n+ Note\n+ -----\n+ By calling the `obtain_batch_bandit_feedback` method several times,\n+ we can resample logged bandit data from the same data generating distribution.\n+ This can be used to estimate confidence intervals of the performances of OPE estimators.\n+\n+ If `behavior_policy_function`=None, the behavior policy will be generated from the true expected reward function.\n+ See the description of the `beta` argument, which controls the optimality and entropy of the behavior policy.\n+\n+\n+ Parameters\n+ -----------\n+ n_actions: int\n+ Number of actions.\n+\n+ dim_context: int, default=1\n+ Number of dimensions of context vectors.\n+\n+ reward_type: str, default='binary'\n+ Whether the rewards are 'binary' or 'continuous'.\n+ When 'binary', rewards are sampled from the Bernoulli distribution.\n+ When 'continuous', rewards are sampled from the truncated Normal distribution with `scale=1`.\n+ The mean parameter of the reward distribution is determined by the `reward_function` specified by the next argument.\n+\n+ reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=None\n+ Function defining the expected reward for each given action-context pair,\n+ i.e., :math:`q: \\\\mathcal{X} \\\\times \\\\mathcal{A} \\\\rightarrow \\\\mathbb{R}`.\n+ If None, context **independent** expected rewards will be\n+ sampled from the uniform distribution automatically.\n+\n+ reward_std: float, default=1.0\n+ Standard deviation of the reward distribution.\n+ A larger value leads to a noisier reward distribution.\n+ This argument is valid only when `reward_type=\"continuous\"`.\n+\n+ behavior_policy_function: Callable[[np.ndarray, np.ndarray], np.ndarray], default=None\n+ Function generating logit values, which will be used to define the behavior policy via softmax transformation.\n+ If None, behavior policy will be generated by applying the softmax function to the expected reward.\n+ Thus, in this case, it is possible to control the optimality of the behavior policy by customizing `beta`.\n+ If `beta` is large, the behavior policy becomes near-deterministic/near-optimal,\n+ while a small or negative value of `beta` leads to a sub-optimal behavior policy.\n+\n+ beta: int or float, default=1.0\n+ Inverse temperature parameter, which controls the optimality and entropy of the behavior policy.\n+ A large value leads to a near-deterministic behavior policy,\n+ while a small value leads to a near-uniform behavior policy.\n+ A positive value leads to a near-optimal behavior policy,\n+ while a negative value leads to a sub-optimal behavior policy.\n+\n+ n_cat_per_dim: int, default=10\n+ Number of categories (cardinality) per category dimension.\n+\n+ dim_latent_param_mat: int, default=5\n+ Number of dimensions of the latent parameter matrix to define the expected rewards.\n+ We assume that each category has a corresponding latent parameter representation, which\n+ affects the expected reward of the category. This parameter matrix is unobserved to the estimators.\n+\n+ n_cat_dim: int, default=3\n+ Number of action/item category dimensions.\n+\n+ n_unobserved_cat_dim: int, default=0\n+ Number of unobserved category dimensions.\n+ When there are some unobserved dimensions, the marginalized IPW estimator should have a larger bias.\n+\n+ n_irrelevant_cat_dim: int, default=0\n+ Number of category dimensions irrelevant to the expected rewards.\n+ Discarding irrelevant category dimensions does not increase the bias, while decreasing the variance,\n+ possibly leading to a better MSE of the resulting estimators.\n+\n+ n_deficient_actions: int, default=0\n+ Number of deficient actions having zero probability of being selected in the logged bandit data.\n+ If there are some deficient actions, the full/common support assumption is very likely to be violated,\n+ leading to some bias for IPW-type estimators. See Sachdeva et al.(2020) for details.\n+ `n_deficient_actions` should be an integer smaller than `n_actions - 1` so that there exists at least one action\n+ that have a positive probability of being selected by the behavior policy.\n+\n+ random_state: int, default=12345\n+ Controls the random seed in sampling synthetic bandit data.\n+\n+ dataset_name: str, default='synthetic_bandit_dataset_with_action_category'\n+ Name of the dataset.\n+\n+ Examples\n+ ----------\n+\n+ .. code-block:: python\n+\n+ >>> from obp.dataset import (\n+ SyntheticBanditDatasetWithActionEmbeds,\n+ logistic_reward_function\n+ )\n+\n+ # generate synthetic contextual bandit feedback with 10 actions and 3 dimensional action embeddings.\n+ >>> dataset = SyntheticBanditDatasetWithActionEmbeds(\n+ n_actions=10,\n+ dim_context=5,\n+ reward_function=logistic_reward_function,\n+ beta=5,\n+ n_cat_per_dim=10,\n+ n_cat_dim=3,\n+ random_state=12345,\n+ )\n+ >>> bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=100000)\n+ >>> bandit_feedback\n+\n+ {\n+ 'n_rounds': 10000,\n+ 'n_actions': 10,\n+ 'action_context': array([[8, 6, 3],\n+ [5, 2, 5],\n+ [3, 6, 2],\n+ [3, 2, 5],\n+ [1, 6, 7],\n+ [8, 2, 8],\n+ [4, 1, 0],\n+ [0, 4, 8],\n+ [8, 1, 8],\n+ [1, 0, 2]]),\n+ 'action_embed': array([[1, 3, 5],\n+ [3, 0, 8],\n+ [0, 3, 5],\n+ ...,\n+ [2, 6, 0],\n+ [2, 9, 9],\n+ [8, 3, 5]]),\n+ 'context': array([[ 0.75820197, -0.5155835 , -0.59120232, 0.89674578, -0.97143752],\n+ [ 1.84080991, 0.15388123, -0.27408394, -1.78492569, 0.98100669],\n+ [-0.87371714, -1.01563442, -0.41124354, 1.46562117, -1.00621906],\n+ ...,\n+ [ 1.62370913, 0.34897048, -0.54162779, 0.80960508, 0.62319086],\n+ [-0.65634921, -0.72257087, -0.49909509, 0.34077923, 0.16772229],\n+ ...,\n+ [0.09797275],\n+ [0.0740289 ],\n+ [0.11308123]]]),\n+ 'pscore': array([0.1194293 , 0.22462219, 0.11744424, ..., 0.0833922 , 0.09694612,\n+ 0.09797275])\n+ }\n+\n+\n+ References\n+ ------------\n+ Yuta Saito and Thorsten Joachims.\n+ \"Off-Policy Evaluation for Large Action Spaces via Embeddings.\" 2022.\n+\n+ Noveen Sachdeva, Yi Su, and Thorsten Joachims.\n+ \"Off-policy Bandits with Deficient Support.\" 2020.\n+\n+ \"\"\"\n+\n+ n_actions: int\n+ dim_context: int = 1\n+ reward_type: str = RewardType.BINARY.value\n+ reward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\n+ reward_std: float = 1.0\n+ behavior_policy_function: Optional[\n+ Callable[[np.ndarray, np.ndarray], np.ndarray]\n+ ] = None\n+ beta: float = 0.0\n+ n_cat_per_dim: int = 10\n+ dim_latent_param_mat: int = 5\n+ n_cat_dim: int = 3\n+ n_unobserved_cat_dim: int = 0\n+ n_irrelevant_cat_dim: int = 0\n+ n_deficient_actions: int = 0\n+ random_state: int = 12345\n+ dataset_name: str = \"synthetic_bandit_dataset_with_action_embed\"\n+\n+ def __post_init__(self) -> None:\n+ \"\"\"Initialize Class.\"\"\"\n+ super().__post_init__()\n+ check_scalar(self.n_cat_per_dim, \"n_cat_per_dim\", int, min_val=1)\n+ check_scalar(self.dim_latent_param_mat, \"dim_latent_param_mat\", int, min_val=1)\n+ check_scalar(self.n_cat_dim, \"n_cat_dim\", int, min_val=1)\n+ check_scalar(\n+ self.n_unobserved_cat_dim,\n+ \"n_unobserved_cat_dim\",\n+ int,\n+ min_val=0,\n+ max_val=self.n_cat_dim - 1,\n+ )\n+ check_scalar(\n+ self.n_irrelevant_cat_dim,\n+ \"n_irrelevant_cat_dim\",\n+ int,\n+ min_val=0,\n+ max_val=self.n_cat_dim - 1,\n+ )\n+ self.n_cat_dim += 1\n+ self.n_unobserved_cat_dim += 1\n+ self.n_irrelevant_cat_dim += 1\n+ self.define_action_embed()\n+\n+ def define_action_embed(self) -> None:\n+ \"\"\"Define action embeddings and latent category parameter matrices.\"\"\"\n+ self.latent_cat_param = self.random_.normal(\n+ size=(self.n_cat_dim, self.n_cat_per_dim, self.dim_latent_param_mat)\n+ )\n+ self.p_e_a = softmax(\n+ self.random_.normal(\n+ size=(self.n_actions, self.n_cat_per_dim, self.n_cat_dim)\n+ ),\n+ )\n+ self.action_context_reg = np.zeros((self.n_actions, self.n_cat_dim), dtype=int)\n+ for d in np.arange(self.n_cat_dim):\n+ self.action_context_reg[:, d] = sample_action_fast(\n+ self.p_e_a[np.arange(self.n_actions, dtype=int), :, d],\n+ random_state=self.random_state + d,\n+ )\n+\n+ def obtain_batch_bandit_feedback(self, n_rounds: int) -> BanditFeedback:\n+ \"\"\"Obtain batch logged bandit data.\n+\n+ Parameters\n+ ----------\n+ n_rounds: int\n+ Data size of the synthetic logged bandit data.\n+\n+ Returns\n+ ---------\n+ bandit_feedback: BanditFeedback\n+ Synthesized logged bandit data with action category information.\n+\n+ \"\"\"\n+ check_scalar(n_rounds, \"n_rounds\", int, min_val=1)\n+ contexts = self.random_.normal(size=(n_rounds, self.dim_context))\n+ cat_dim_importance = np.zeros(self.n_cat_dim)\n+ cat_dim_importance[self.n_irrelevant_cat_dim :] = self.random_.dirichlet(\n+ alpha=self.random_.uniform(size=self.n_cat_dim - self.n_irrelevant_cat_dim),\n+ size=1,\n+ )\n+ cat_dim_importance = cat_dim_importance.reshape((1, 1, self.n_cat_dim))\n+\n+ # calc expected rewards given context and action (n_data, n_actions)\n+ q_x_e = np.zeros((n_rounds, self.n_cat_per_dim, self.n_cat_dim))\n+ q_x_a = np.zeros((n_rounds, self.n_actions, self.n_cat_dim))\n+ for d in np.arange(self.n_cat_dim):\n+ q_x_e[:, :, d] = self.reward_function(\n+ context=contexts,\n+ action_context=self.latent_cat_param[d],\n+ random_state=self.random_state + d,\n+ )\n+ q_x_a[:, :, d] = q_x_e[:, :, d] @ self.p_e_a[:, :, d].T\n+ q_x_a = (q_x_a * cat_dim_importance).sum(2)\n+\n+ # sample actions for each round based on the behavior policy\n+ if self.behavior_policy_function is None:\n+ pi_b_logits = q_x_a\n+ else:\n+ pi_b_logits = self.behavior_policy_function(\n+ context=contexts,\n+ action_context=self.action_context,\n+ random_state=self.random_state,\n+ )\n+ if self.n_deficient_actions > 0:\n+ pi_b = np.zeros_like(q_x_a)\n+ n_supported_actions = self.n_actions - self.n_deficient_actions\n+ supported_actions = np.argsort(\n+ self.random_.gumbel(size=(n_rounds, self.n_actions)), axis=1\n+ )[:, ::-1][:, :n_supported_actions]\n+ supported_actions_idx = (\n+ np.tile(np.arange(n_rounds), (n_supported_actions, 1)).T,\n+ supported_actions,\n+ )\n+ pi_b[supported_actions_idx] = softmax(\n+ self.beta * pi_b_logits[supported_actions_idx]\n+ )\n+ else:\n+ pi_b = softmax(self.beta * pi_b_logits)\n+ actions = sample_action_fast(pi_b, random_state=self.random_state)\n+\n+ # sample action embeddings based on sampled actions\n+ action_embed = np.zeros((n_rounds, self.n_cat_dim), dtype=int)\n+ for d in np.arange(self.n_cat_dim):\n+ action_embed[:, d] = sample_action_fast(\n+ self.p_e_a[actions, :, d],\n+ random_state=d,\n+ )\n+\n+ # sample rewards given the context and action embeddings\n+ expected_rewards_factual = np.zeros(n_rounds)\n+ for d in np.arange(self.n_cat_dim):\n+ expected_rewards_factual += (\n+ cat_dim_importance[0, 0, d]\n+ * q_x_e[np.arange(n_rounds), action_embed[:, d], d]\n+ )\n+ if RewardType(self.reward_type) == RewardType.BINARY:\n+ rewards = self.random_.binomial(n=1, p=expected_rewards_factual)\n+ elif RewardType(self.reward_type) == RewardType.CONTINUOUS:\n+ rewards = self.random_.normal(\n+ loc=expected_rewards_factual, scale=self.reward_std, size=n_rounds\n+ )\n+\n+ return dict(\n+ n_rounds=n_rounds,\n+ n_actions=self.n_actions,\n+ action_context=self.action_context_reg[\n+ :, 1:\n+ ], # action context used for training a reg model\n+ action_embed=action_embed[\n+ :, self.n_unobserved_cat_dim :\n+ ], # action embeddings used for OPE with MIPW\n+ context=contexts,\n+ action=actions,\n+ position=None, # position effect is not considered in synthetic data\n+ reward=rewards,\n+ expected_reward=q_x_a,\n+ q_x_e=q_x_e,\n+ p_e_a=self.p_e_a[:, :, self.n_unobserved_cat_dim :],\n+ pi_b=pi_b[:, :, np.newaxis],\n+ pscore=pi_b[np.arange(n_rounds), actions],\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | implement SyntheticBanditDatasetWithActionEmbeds |
641,014 | 13.02.2022 15:00:49 | 18,000 | d8c3d3846de8c762f2480e55a899c128ba4c6a43 | fix gradients | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -614,7 +614,7 @@ class QLearner(BaseOfflinePolicyLearner):\n@dataclass\nclass NNPolicyLearner(BaseOfflinePolicyLearner):\n- \"\"\"Off-policy learner parameterized by on a neural network.\n+ \"\"\"Off-policy learner parameterized by a neural network.\nParameters\n-----------\n@@ -1175,43 +1175,36 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nReturns\n----------\n- estimated_policy_value_arr: array-like, shape (batch_size,)\n+ estimated_policy_grad: array-like, shape (batch_size,)\nRewards of each data estimated by an OPE estimator.\n\"\"\"\n+ current_pi = action_dist[:, :, 0].detach()\n+ log_prob = torch.log(action_dist[:, :, 0])\n+ idx_tensor = torch.arange(action.shape[0], dtype=torch.long)\n+\nif self.off_policy_objective == \"dm\":\n- estimated_policy_value_arr = self.q_func_estimator.predict(\n+ q_hat = self.q_func_estimator.predict(\ncontext=context,\n- action_dist=action_dist,\n)\n+ estimated_policy_grad = torch.sum(q_hat * current_pi * log_prob, dim=1)\nelif self.off_policy_objective == \"ipw\":\n- n = action.shape[0]\n- idx_tensor = torch.arange(n, dtype=torch.long)\n- with torch.no_grad():\n- current_pi = action_dist[idx_tensor, action, 0]\n- iw = current_pi / pscore\n- log_prob = torch.log(action_dist[idx_tensor, action, 0])\n- estimated_policy_value_arr = iw * reward * log_prob\n+ iw = current_pi[idx_tensor, action] / pscore\n+ estimated_policy_grad = iw * reward\n+ estimated_policy_grad *= log_prob[idx_tensor, action]\nelif self.off_policy_objective == \"dr\":\n- n_rounds = action.shape[0]\n- idx_tensor = torch.arange(n_rounds, dtype=torch.long)\n- iw = action_dist[idx_tensor, action, 0] / pscore\n- q_hat_baseline = self.q_func_estimator.predict(\n- context=context,\n- action_dist=action_dist,\n- )\n- action_dist_ = torch.zeros((n_rounds, self.n_actions, self.len_list))\n- action_dist_[idx_tensor, action, 0] = 1\n- q_hat_actions = self.q_func_estimator.predict(\n+ q_hat = self.q_func_estimator.predict(\ncontext=context,\n- action_dist=action_dist_,\n)\n- estimated_policy_value_arr = iw * (reward - q_hat_actions)\n- estimated_policy_value_arr += q_hat_baseline\n+ q_hat_factual = q_hat[idx_tensor, action]\n+ iw = current_pi[idx_tensor, action] / pscore\n+ estimated_policy_grad = iw * (reward - q_hat_factual)\n+ estimated_policy_grad *= log_prob[idx_tensor, action]\n+ estimated_policy_grad += torch.sum(q_hat * current_pi * log_prob, dim=1)\n- return estimated_policy_value_arr\n+ return estimated_policy_grad\ndef _estimate_policy_constraint(\nself,\n@@ -1775,7 +1768,6 @@ class QFuncEstimator:\ndef predict(\nself,\ncontext: torch.Tensor,\n- action_dist: torch.Tensor,\n) -> torch.Tensor:\n\"\"\"Predict best continuous actions for new data.\n@@ -1784,9 +1776,6 @@ class QFuncEstimator:\ncontext: Tensor, shape (n_rounds_of_new_data, dim_context)\nContext vectors for new data.\n- action_dist: array-like, shape (n_rounds, n_actions, len_list)\n- Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_i|x_i)`.\n-\nReturns\n-----------\nestimated_expected_rewards: Tensor, shape (n_rounds_of_new_data,)\n@@ -1794,17 +1783,13 @@ class QFuncEstimator:\n\"\"\"\ncheck_tensor(tensor=context, name=\"context\", expected_dim=2)\n- check_tensor(tensor=action_dist, name=\"action_dist\", expected_dim=3)\nif context.shape[1] != self.dim_context:\nraise ValueError(\n\"Expected `context.shape[1] == self.dim_context`, but found it False\"\n)\nself.nn_model.eval()\n- q_hat = self.nn_model(context)\n- estimated_expected_rewards = (q_hat * action_dist[:, :, 0]).mean(1)\n-\n- return estimated_expected_rewards\n+ return self.nn_model(context)\n@dataclass\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix gradients |
641,014 | 15.02.2022 07:53:40 | 18,000 | 5dd44315f043ece4878425ff353b7ed9e425524f | fix checkings | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_embed.py",
"new_path": "obp/dataset/synthetic_embed.py",
"diff": "@@ -72,7 +72,7 @@ class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\nn_cat_per_dim: int, default=10\nNumber of categories (cardinality) per category dimension.\n- dim_latent_param_mat: int, default=5\n+ latent_param_mat_dim: int, default=5\nNumber of dimensions of the latent parameter matrix to define the expected rewards.\nWe assume that each category has a corresponding latent parameter representation, which\naffects the expected reward of the category. This parameter matrix is unobserved to the estimators.\n@@ -180,7 +180,7 @@ class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\n] = None\nbeta: float = 0.0\nn_cat_per_dim: int = 10\n- dim_latent_param_mat: int = 5\n+ latent_param_mat_dim: int = 5\nn_cat_dim: int = 3\nn_unobserved_cat_dim: int = 0\nn_irrelevant_cat_dim: int = 0\n@@ -192,31 +192,31 @@ class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\n\"\"\"Initialize Class.\"\"\"\nsuper().__post_init__()\ncheck_scalar(self.n_cat_per_dim, \"n_cat_per_dim\", int, min_val=1)\n- check_scalar(self.dim_latent_param_mat, \"dim_latent_param_mat\", int, min_val=1)\n+ check_scalar(self.latent_param_mat_dim, \"latent_param_mat_dim\", int, min_val=1)\ncheck_scalar(self.n_cat_dim, \"n_cat_dim\", int, min_val=1)\ncheck_scalar(\nself.n_unobserved_cat_dim,\n\"n_unobserved_cat_dim\",\nint,\nmin_val=0,\n- max_val=self.n_cat_dim - 1,\n+ max_val=self.n_cat_dim,\n)\ncheck_scalar(\nself.n_irrelevant_cat_dim,\n\"n_irrelevant_cat_dim\",\nint,\nmin_val=0,\n- max_val=self.n_cat_dim - 1,\n+ max_val=self.n_cat_dim,\n)\nself.n_cat_dim += 1\nself.n_unobserved_cat_dim += 1\nself.n_irrelevant_cat_dim += 1\n- self.define_action_embed()\n+ self._define_action_embed()\n- def define_action_embed(self) -> None:\n+ def _define_action_embed(self) -> None:\n\"\"\"Define action embeddings and latent category parameter matrices.\"\"\"\nself.latent_cat_param = self.random_.normal(\n- size=(self.n_cat_dim, self.n_cat_per_dim, self.dim_latent_param_mat)\n+ size=(self.n_cat_dim, self.n_cat_per_dim, self.latent_param_mat_dim)\n)\nself.p_e_a = softmax(\nself.random_.normal(\n@@ -327,7 +327,7 @@ class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\nposition=None, # position effect is not considered in synthetic data\nreward=rewards,\nexpected_reward=q_x_a,\n- q_x_e=q_x_e,\n+ q_x_e=q_x_e[:, :, self.n_unobserved_cat_dim :],\np_e_a=self.p_e_a[:, :, self.n_unobserved_cat_dim :],\npi_b=pi_b[:, :, np.newaxis],\npscore=pi_b[np.arange(n_rounds), actions],\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_embed.py",
"new_path": "obp/ope/estimators_embed.py",
"diff": "@@ -42,9 +42,6 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nn_actions: int\nNumber of actions in the logged data.\n- delta: float, default=0.05\n- Confidence level used to estimate the deviation bound in data-driven action embedding selection.\n-\nembedding_selection_method, default=None\nMethod to conduct data-driven action embedding selection. Must be one of None, 'exact', or 'greedy'.\nIf None, the given action embedding (action context) will be used to estimate the marginal importance weights.\n@@ -55,6 +52,9 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nmin_emb_dim: int, default=1\nMinimum number of action embedding dimensions to be used in estimating the marginal importance weights.\n+ delta: float, default=0.05\n+ Confidence level used to estimate the deviation bound in data-driven action embedding selection.\n+\nestimator_name: str, default='mipw'.\nName of the estimator.\n@@ -66,9 +66,9 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\n\"\"\"\nn_actions: int\n- delta: float = 0.05\nembedding_selection_method: Optional[str] = None\nmin_emb_dim: int = 1\n+ delta: float = 0.05\nestimator_name: str = \"mipw\"\ndef __post_init__(self) -> None:\n@@ -207,12 +207,12 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\ndef estimate_policy_value(\nself,\n- context: np.ndarray,\nreward: np.ndarray,\naction: np.ndarray,\naction_embed: np.ndarray,\npi_b: np.ndarray,\naction_dist: np.ndarray,\n+ context: Optional[np.ndarray] = None,\np_e_a: Optional[np.ndarray] = None,\nposition: Optional[np.ndarray] = None,\n**kwargs,\n@@ -242,6 +242,9 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nIf None, the effect of position on the reward will be ignored.\n(If only a single action is chosen for each data, you can just ignore this argument.)\n+ context: array-like, shape (n_rounds, dim_context), default=None\n+ Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.\n+\np_e_a: array-like, shape (n_actions, n_cat_per_dim, n_cat_dim), default=None\nConditional distribution of action embeddings given each action.\nThis distribution is available only when we use synthetic bandit data, i.e.,\n@@ -276,6 +279,8 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nposition = np.zeros(action_dist.shape[0], dtype=int)\nif p_e_a is not None:\ncheck_array(array=p_e_a, name=\"p_e_a\", expected_dim=3)\n+ else:\n+ check_array(array=context, name=\"context\", expected_dim=2)\nif self.embedding_selection_method == \"exact\":\nreturn self._estimate_with_exact_pruning(\n@@ -420,12 +425,12 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\ndef estimate_interval(\nself,\n- context: np.ndarray,\nreward: np.ndarray,\naction: np.ndarray,\naction_embed: np.ndarray,\npi_b: np.ndarray,\naction_dist: np.ndarray,\n+ context: Optional[np.ndarray] = None,\nposition: Optional[np.ndarray] = None,\np_e_a: Optional[np.ndarray] = None,\nalpha: float = 0.05,\n@@ -453,6 +458,9 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\naction_dist: array-like, shape (n_rounds, n_actions, len_list)\nAction choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\\\pi_e(a_i|x_i)`.\n+ context: array-like, shape (n_rounds, dim_context), default=None\n+ Context vectors observed for each data in logged bandit data, i.e., :math:`x_i`.\n+\nposition: array-like, shape (n_rounds,), default=None\nIndices to differentiate positions in a recommendation interface where the actions are presented.\nIf None, the effect of position on the reward will be ignored.\n@@ -501,6 +509,8 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nposition = np.zeros(action_dist.shape[0], dtype=int)\nif p_e_a is not None:\ncheck_array(array=p_e_a, name=\"p_e_a\", expected_dim=3)\n+ else:\n+ check_array(array=context, name=\"context\", expected_dim=2)\nestimated_round_rewards = self._estimate_round_rewards(\ncontext=context,\n@@ -544,12 +554,6 @@ class SelfNormalizedMarginalizedInverseProbabilityWeighting(\nn_actions: int\nNumber of actions in the logged data.\n- min_emb_dim: int, default=1\n- Minimum number of action embedding dimensions to be used in estimating the marginal importance weights.\n-\n- delta: float, default=0.05\n- Confidence level used to estimate the deviation bound in data-driven action embedding selection.\n-\nembedding_selection_method, default=None\nMethod to conduct data-driven action embedding selection. Must be one of None, 'exact', or 'greedy'.\nIf None, the given action embedding (action context) will be used to estimate the marginal importance weights.\n@@ -557,6 +561,12 @@ class SelfNormalizedMarginalizedInverseProbabilityWeighting(\nbut might be worse in terms of OPE performance.\nIf the number of action embedding dimensions is larger than 20, 'greedy' is a recommended choice.\n+ min_emb_dim: int, default=1\n+ Minimum number of action embedding dimensions to be used in estimating the marginal importance weights.\n+\n+ delta: float, default=0.05\n+ Confidence level used to estimate the deviation bound in data-driven action embedding selection.\n+\nestimator_name: str, default='snmipw'.\nName of the estimator.\n@@ -567,9 +577,6 @@ class SelfNormalizedMarginalizedInverseProbabilityWeighting(\n\"\"\"\n- min_emb_dim: int = 1\n- delta: float = 0.05\n- embedding_selection_method: Optional[str] = None\nestimator_name: str = \"snmipw\"\ndef _estimate_round_rewards(\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix checkings |
641,014 | 17.02.2022 17:23:23 | 18,000 | 4932a4980b407e1378f0f9b20b495a422a214469 | add a parameter to control the embedding distribution for SyntheticBanditDatasetWithActionEmbeds | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_embed.py",
"new_path": "obp/dataset/synthetic_embed.py",
"diff": "from dataclasses import dataclass\nfrom typing import Callable\nfrom typing import Optional\n+from typing import Union\nimport numpy as np\nfrom sklearn.utils import check_scalar\n@@ -80,6 +81,10 @@ class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\nn_cat_dim: int, default=3\nNumber of action/item category dimensions.\n+ p_e_a_param_std: int or float, default=1.0\n+ Standard deviation of the normal distribution to sample the parameters of the action embedding distribution.\n+ A large value generates a near-deterministic embedding distribution, while a small value generates a near-uniform embedding distribution.\n+\nn_unobserved_cat_dim: int, default=0\nNumber of unobserved category dimensions.\nWhen there are some unobserved dimensions, the marginalized IPW estimator should have a larger bias.\n@@ -174,14 +179,15 @@ class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\ndim_context: int = 1\nreward_type: str = RewardType.BINARY.value\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\n- reward_std: float = 1.0\n+ reward_std: Union[int, float] = 1.0\nbehavior_policy_function: Optional[\nCallable[[np.ndarray, np.ndarray], np.ndarray]\n] = None\n- beta: float = 0.0\n+ beta: Union[int, float] = 0.0\nn_cat_per_dim: int = 10\nlatent_param_mat_dim: int = 5\nn_cat_dim: int = 3\n+ p_e_a_param_std: Union[int, float] = 1.0\nn_unobserved_cat_dim: int = 0\nn_irrelevant_cat_dim: int = 0\nn_deficient_actions: int = 0\n@@ -194,6 +200,7 @@ class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\ncheck_scalar(self.n_cat_per_dim, \"n_cat_per_dim\", int, min_val=1)\ncheck_scalar(self.latent_param_mat_dim, \"latent_param_mat_dim\", int, min_val=1)\ncheck_scalar(self.n_cat_dim, \"n_cat_dim\", int, min_val=1)\n+ check_scalar(self.p_e_a_param_std, \"p_e_a_param_std\", (int, float), min_val=0.0)\ncheck_scalar(\nself.n_unobserved_cat_dim,\n\"n_unobserved_cat_dim\",\n@@ -220,7 +227,8 @@ class SyntheticBanditDatasetWithActionEmbeds(SyntheticBanditDataset):\n)\nself.p_e_a = softmax(\nself.random_.normal(\n- size=(self.n_actions, self.n_cat_per_dim, self.n_cat_dim)\n+ scale=self.p_e_a_param_std,\n+ size=(self.n_actions, self.n_cat_per_dim, self.n_cat_dim),\n),\n)\nself.action_context_reg = np.zeros((self.n_actions, self.n_cat_dim), dtype=int)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic_embed.py",
"new_path": "tests/dataset/test_synthetic_embed.py",
"diff": "@@ -5,7 +5,7 @@ from obp.dataset import logistic_reward_function\nfrom obp.dataset import SyntheticBanditDatasetWithActionEmbeds\n-# n_actions, dim_context, reward_type, reward_std, beta, n_cat_per_dim, latent_param_mat_dim, n_cat_dim, n_unobserved_cat_dim, n_irrelevant_cat_dim, n_deficient_actions, action_context, random_state, err, description\n+# n_actions, dim_context, reward_type, reward_std, beta, n_cat_per_dim, latent_param_mat_dim, n_cat_dim, p_e_a_param_std, n_unobserved_cat_dim, n_irrelevant_cat_dim, n_deficient_actions, action_context, random_state, err, description\ninvalid_input_of_init = [\n(\n\"3\", #\n@@ -16,6 +16,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -33,6 +34,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -50,6 +52,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -67,6 +70,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -84,6 +88,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -101,6 +106,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -118,6 +124,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -135,6 +142,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -152,6 +160,7 @@ invalid_input_of_init = [\n1.0, #\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -169,6 +178,7 @@ invalid_input_of_init = [\n0, #\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -186,6 +196,7 @@ invalid_input_of_init = [\n1,\n1.0, #\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -203,6 +214,7 @@ invalid_input_of_init = [\n1,\n0, #\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -220,6 +232,7 @@ invalid_input_of_init = [\n1,\n1,\n1.0, #\n+ 1.0,\n0,\n0,\n0,\n@@ -237,6 +250,7 @@ invalid_input_of_init = [\n1,\n1,\n0, #\n+ 1.0,\n0,\n0,\n0,\n@@ -254,6 +268,43 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ \"1.0\", #\n+ 0,\n+ 0,\n+ 0,\n+ None,\n+ 12345,\n+ TypeError,\n+ r\"p_e_a_param_std must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n+ ),\n+ (\n+ 3,\n+ 5,\n+ \"binary\",\n+ 1.0,\n+ 0.0,\n+ 1,\n+ 1,\n+ 1,\n+ -1.0, #\n+ 0,\n+ 0,\n+ 0,\n+ None,\n+ 12345,\n+ ValueError,\n+ r\"p_e_a_param_std == -1.0, must be >= 0.\",\n+ ),\n+ (\n+ 3,\n+ 5,\n+ \"binary\",\n+ 1.0,\n+ 0.0,\n+ 1,\n+ 1,\n+ 1,\n+ 1.0,\n0.0, #\n0,\n0,\n@@ -271,6 +322,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n-1, #\n0,\n0,\n@@ -288,6 +340,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n2, #\n0,\n0,\n@@ -305,6 +358,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0.0, #\n0,\n@@ -322,7 +376,8 @@ invalid_input_of_init = [\n1,\n1,\n1,\n- 1,\n+ 1.0,\n+ 0,\n-1, #\n0,\nNone,\n@@ -339,6 +394,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n2, #\n0,\n@@ -356,6 +412,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n\"0\", #\n@@ -373,6 +430,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n1.0, #\n@@ -390,6 +448,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n10, #\n@@ -407,6 +466,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -424,6 +484,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -441,6 +502,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -458,6 +520,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -475,6 +538,7 @@ invalid_input_of_init = [\n1,\n1,\n1,\n+ 1.0,\n0,\n0,\n0,\n@@ -487,7 +551,7 @@ invalid_input_of_init = [\[email protected](\n- \"n_actions, dim_context, reward_type, reward_std, beta, n_cat_per_dim, latent_param_mat_dim, n_cat_dim, n_unobserved_cat_dim, n_irrelevant_cat_dim, n_deficient_actions, action_context, random_state, err, description\",\n+ \"n_actions, dim_context, reward_type, reward_std, beta, n_cat_per_dim, latent_param_mat_dim, n_cat_dim, p_e_a_param_std, n_unobserved_cat_dim, n_irrelevant_cat_dim, n_deficient_actions, action_context, random_state, err, description\",\ninvalid_input_of_init,\n)\ndef test_synthetic_init_using_invalid_inputs(\n@@ -499,6 +563,7 @@ def test_synthetic_init_using_invalid_inputs(\nn_cat_per_dim,\nlatent_param_mat_dim,\nn_cat_dim,\n+ p_e_a_param_std,\nn_unobserved_cat_dim,\nn_irrelevant_cat_dim,\nn_deficient_actions,\n@@ -518,6 +583,7 @@ def test_synthetic_init_using_invalid_inputs(\nn_cat_per_dim=n_cat_per_dim,\nlatent_param_mat_dim=latent_param_mat_dim,\nn_cat_dim=n_cat_dim,\n+ p_e_a_param_std=p_e_a_param_std,\nn_unobserved_cat_dim=n_unobserved_cat_dim,\nn_irrelevant_cat_dim=n_irrelevant_cat_dim,\naction_context=action_context,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add a parameter to control the embedding distribution for SyntheticBanditDatasetWithActionEmbeds |
641,014 | 27.03.2022 07:02:46 | 14,400 | 66e052fb60359be849a1e07c8a83189bfb605fc7 | fix a bug of mipw with one-dimensional action embed | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_embed.py",
"new_path": "obp/ope/estimators_embed.py",
"diff": "@@ -293,6 +293,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nelse:\ncheck_array(array=context, name=\"context\", expected_dim=2)\n+ if action_embed.shape[1] > 1 and self.embedding_selection_method is not None:\nif self.embedding_selection_method == \"exact\":\nreturn self._estimate_with_exact_pruning(\ncontext=context,\n@@ -303,7 +304,6 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\npi_b=pi_b,\naction_dist=action_dist,\n)\n-\nelif self.embedding_selection_method == \"greedy\":\nreturn self._estimate_with_greedy_pruning(\ncontext=context,\n@@ -314,7 +314,6 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\npi_b=pi_b,\naction_dist=action_dist,\n)\n-\nelse:\nreturn self._estimate_round_rewards(\ncontext=context,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix a bug of mipw with one-dimensional action embed |
641,014 | 27.03.2022 12:53:15 | 14,400 | a8a32782453db89ed9ebd96f85c4ce151dba04b7 | modify some variable names in MIPS | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_embed.py",
"new_path": "obp/ope/estimators_embed.py",
"diff": "@@ -44,7 +44,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nn_actions: int\nNumber of actions in the logged data.\n- p_a_e_estimator: ClassifierMixin, default=`sklearn.linear_model.LogisticRegression(max_iter=1000, random_state=12345)`\n+ pi_a_x_e_estimator: ClassifierMixin, default=`sklearn.linear_model.LogisticRegression(max_iter=1000, random_state=12345)`\nA sklearn classifier to estimate :math:`\\\\pi(a|x,e)`.\nIt is then used to estimate the marginal importance weight as\n:math:`\\\\hat{w}(x,e) = \\\\mathbb{E}_{\\\\hat{\\\\pi}(a|x,e)}[w(x,a)]`.\n@@ -73,7 +73,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\n\"\"\"\nn_actions: int\n- p_a_e_estimator: ClassifierMixin = LogisticRegression(\n+ pi_a_x_e_estimator: ClassifierMixin = LogisticRegression(\nmax_iter=1000, random_state=12345\n)\nembedding_selection_method: Optional[str] = None\n@@ -103,8 +103,8 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\n\"If given, `embedding_selection_method` must be either 'exact' or 'greedy', but\"\nf\"{self.embedding_selection_method} is given.\"\n)\n- if not is_classifier(self.p_a_e_estimator):\n- raise ValueError(\"`p_a_e_estimator` must be a classifier.\")\n+ if not is_classifier(self.pi_a_x_e_estimator):\n+ raise ValueError(\"`pi_a_x_e_estimator` must be a classifier.\")\ndef _estimate_round_rewards(\nself,\n@@ -209,10 +209,10 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\ndrop=\"first\",\n).fit_transform(action_embed)\nx_e = np.c_[context, c]\n- p_a_e = np.zeros((n, self.n_actions))\n- self.p_a_e_estimator.fit(x_e, action)\n- p_a_e[:, np.unique(action)] = self.p_a_e_estimator.predict_proba(x_e)\n- w_x_e = (w_x_a * p_a_e).sum(1)\n+ pi_a_x_e = np.zeros((n, self.n_actions))\n+ self.pi_a_x_e_estimator.fit(x_e, action)\n+ pi_a_x_e[:, np.unique(action)] = self.pi_a_x_e_estimator.predict_proba(x_e)\n+ w_x_e = (w_x_a * pi_a_x_e).sum(1)\nreturn w_x_e\n@@ -403,8 +403,8 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nwhile current_feat.shape[0] > self.min_emb_dim:\ntheta_list_, cnf_list_, d_list_ = [], [], []\nfor d in current_feat:\n- exclude_d_idx = np.where(current_feat != d, True, False)\n- candidate_feat = current_feat[exclude_d_idx]\n+ idx_without_d = np.where(current_feat != d, True, False)\n+ candidate_feat = current_feat[idx_without_d]\ntheta, cnf = self._estimate_round_rewards(\ncontext=context,\nreward=reward,\n@@ -420,15 +420,15 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nidx_list = np.argsort(cnf_list_)[::-1]\nfor idx in idx_list:\n- exclude_d = d_list_[idx]\n+ excluded_dim = d_list_[idx]\ntheta_i, cnf_i = theta_list_[idx], cnf_list_[idx]\ntheta_j, cnf_j = np.array(theta_list), np.array(cnf_list)\nif (np.abs(theta_j - theta_i) <= cnf_i + C * cnf_j).all():\ntheta_list.append(theta_i), cnf_list.append(cnf_i)\nelse:\nreturn theta_j[-1]\n- exclude_d_idx = np.where(current_feat != exclude_d, True, False)\n- current_feat = current_feat[exclude_d_idx]\n+ idx_without_d = np.where(current_feat != excluded_dim, True, False)\n+ current_feat = current_feat[idx_without_d]\nreturn theta_j[-1]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators_embed.py",
"new_path": "tests/ope/test_ipw_estimators_embed.py",
"diff": "@@ -11,7 +11,7 @@ from obp.ope import SelfNormalizedMarginalizedInverseProbabilityWeighting as SNM\nfrom obp.types import BanditFeedback\n-# n_actions, delta, p_a_e_estimator, embedding_selection_method, min_emb_dim, err, description\n+# n_actions, delta, pi_a_x_e_estimator, embedding_selection_method, min_emb_dim, err, description\ninvalid_input_of_ipw_init = [\n(\n2.0, #\n@@ -65,7 +65,7 @@ invalid_input_of_ipw_init = [\nNone,\n1,\nValueError,\n- r\"`p_a_e_estimator` must be a classifier.\",\n+ r\"`pi_a_x_e_estimator` must be a classifier.\",\n),\n(\n2,\n@@ -74,7 +74,7 @@ invalid_input_of_ipw_init = [\nNone,\n1,\nValueError,\n- r\"`p_a_e_estimator` must be a classifier.\",\n+ r\"`pi_a_x_e_estimator` must be a classifier.\",\n),\n(\n2,\n@@ -107,13 +107,13 @@ invalid_input_of_ipw_init = [\[email protected](\n- \"n_actions, delta, p_a_e_estimator, embedding_selection_method, min_emb_dim, err, description\",\n+ \"n_actions, delta, pi_a_x_e_estimator, embedding_selection_method, min_emb_dim, err, description\",\ninvalid_input_of_ipw_init,\n)\ndef test_mipw_init_using_invalid_inputs(\nn_actions,\ndelta,\n- p_a_e_estimator,\n+ pi_a_x_e_estimator,\nembedding_selection_method,\nmin_emb_dim,\nerr,\n@@ -123,7 +123,7 @@ def test_mipw_init_using_invalid_inputs(\n_ = MIPW(\nn_actions=n_actions,\ndelta=delta,\n- p_a_e_estimator=p_a_e_estimator,\n+ pi_a_x_e_estimator=pi_a_x_e_estimator,\nembedding_selection_method=embedding_selection_method,\nmin_emb_dim=min_emb_dim,\n)\n@@ -132,7 +132,7 @@ def test_mipw_init_using_invalid_inputs(\n_ = SNMIPW(\nn_actions=n_actions,\ndelta=delta,\n- p_a_e_estimator=p_a_e_estimator,\n+ pi_a_x_e_estimator=pi_a_x_e_estimator,\nembedding_selection_method=embedding_selection_method,\nmin_emb_dim=min_emb_dim,\n)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | modify some variable names in MIPS |
641,014 | 02.04.2022 11:56:32 | 14,400 | 0dda2322adc8b38c75e02ead7e17a68521591a0f | modify init learning rate | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -743,7 +743,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nsolver: str = \"adam\"\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\n- learning_rate_init: float = 0.0001\n+ learning_rate_init: float = 0.001\nmax_iter: int = 200\nshuffle: bool = True\nrandom_state: Optional[int] = None\n@@ -1477,7 +1477,7 @@ class QFuncEstimator:\nsolver: str = \"adam\"\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\n- learning_rate_init: float = 0.0001\n+ learning_rate_init: float = 0.001\nmax_iter: int = 200\nshuffle: bool = True\nrandom_state: Optional[int] = None\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | modify init learning rate |
641,014 | 02.04.2022 11:56:57 | 14,400 | 22fd3adb0e6d41795a2893ad509bafc42f7259f7 | fix tests on mipw | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_ipw_estimators_embed.py",
"new_path": "tests/ope/test_ipw_estimators_embed.py",
"diff": "@@ -314,9 +314,12 @@ def test_mipw_using_invalid_input_data(\n) -> None:\n# prepare ipw instances\nmipw = MIPW(n_actions=2)\n- snmipw = MIPW(n_actions=2)\n+ mipw_exact = MIPW(n_actions=2, embedding_selection_method=\"exact\")\n+ mipw_greedy = MIPW(n_actions=2, embedding_selection_method=\"greedy\")\n+ snmipw = SNMIPW(n_actions=2)\n+ for est in [mipw, mipw_exact, mipw_greedy, snmipw]:\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n- _ = mipw.estimate_policy_value(\n+ _ = est.estimate_policy_value(\naction_dist=action_dist,\ncontext=context,\naction=action,\n@@ -327,29 +330,7 @@ def test_mipw_using_invalid_input_data(\nposition=position,\n)\nwith pytest.raises(ValueError, match=f\"{description}*\"):\n- _ = mipw.estimate_interval(\n- action_dist=action_dist,\n- context=context,\n- action=action,\n- reward=reward,\n- action_embed=action_embed,\n- pi_b=pi_b,\n- p_e_a=p_e_a,\n- position=position,\n- )\n- with pytest.raises(ValueError, match=f\"{description}*\"):\n- _ = snmipw.estimate_policy_value(\n- action_dist=action_dist,\n- context=context,\n- action=action,\n- reward=reward,\n- action_embed=action_embed,\n- pi_b=pi_b,\n- p_e_a=p_e_a,\n- position=position,\n- )\n- with pytest.raises(ValueError, match=f\"{description}*\"):\n- _ = snmipw.estimate_interval(\n+ _ = est.estimate_interval(\naction_dist=action_dist,\ncontext=context,\naction=action,\n@@ -376,9 +357,11 @@ def test_ipw_using_random_evaluation_policy(\n}\ninput_dict[\"action_dist\"] = action_dist\nmipw = MIPW(n_actions=synthetic_bandit_feedback_with_embed[\"n_actions\"])\n- snmipw = MIPW(n_actions=synthetic_bandit_feedback_with_embed[\"n_actions\"])\n+ mipw_exact = MIPW(n_actions=synthetic_bandit_feedback_with_embed[\"n_actions\"], embedding_selection_method=\"exact\")\n+ mipw_greedy = MIPW(n_actions=synthetic_bandit_feedback_with_embed[\"n_actions\"], embedding_selection_method=\"greedy\")\n+ snmipw = SNMIPW(n_actions=synthetic_bandit_feedback_with_embed[\"n_actions\"])\n# ipw estimators can be used without estimated_rewards_by_reg_model\n- for estimator in [mipw, snmipw]:\n+ for estimator in [mipw, mipw_exact, mipw_greedy, snmipw]:\nestimated_policy_value = estimator.estimate_policy_value(**input_dict)\nassert isinstance(\nestimated_policy_value, float\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_offline_estimation_performance.py",
"new_path": "tests/ope/test_offline_estimation_performance.py",
"diff": "@@ -319,6 +319,11 @@ def test_offline_estimation_performance(\nMarginalizedInverseProbabilityWeighting(\nn_actions=n_actions, estimator_name=\"mipw\"\n),\n+ MarginalizedInverseProbabilityWeighting(\n+ n_actions=n_actions,\n+ embedding_selection_method=\"greedy\",\n+ estimator_name=\"mipw (greedy selection)\"\n+ ),\nSelfNormalizedMarginalizedInverseProbabilityWeighting(\nn_actions=n_actions, estimator_name=\"snmipw\"\n),\n@@ -346,7 +351,7 @@ def test_offline_estimation_performance(\nverbose=0,\n)([delayed(process)(i) for i in np.arange(n_runs)])\nmetric_dict = {est.estimator_name: dict() for est in ope_estimators}\n- metric_dict.update({\"mipw\": dict(), \"snmipw\": dict()})\n+ metric_dict.update({\"mipw\": dict(), \"mipw (greedy selection)\": dict(), \"snmipw\": dict()})\nfor i, relative_ee_i in enumerate(processed):\nfor (\nestimator_name,\n@@ -378,6 +383,7 @@ def test_offline_estimation_performance(\n\"bipw (svc sample)\",\n\"bipw (random_forest sample)\",\n\"mipw\",\n+ \"mipw (greedy selection)\",\n\"snmipw\",\n]\nfor estimator_name in tested_estimators:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix tests on mipw |
641,014 | 03.04.2022 16:54:22 | 14,400 | 895e642bf1f18e37f9152b00f53e2a6ec7677dfb | refactor nnpolicylearner | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -1081,25 +1081,23 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nself.nn_model.train()\nfor x, a, r, p, pos in training_data_loader:\noptimizer.zero_grad()\n- action_dist_by_current_policy = self.nn_model(x).unsqueeze(-1)\n- policy_value_arr = self._estimate_policy_value(\n+ pi = self.nn_model(x).unsqueeze(-1)\n+ policy_grad_arr = -self._estimate_policy_gradient(\ncontext=x,\nreward=r,\naction=a,\npscore=p,\n- action_dist=action_dist_by_current_policy,\n+ action_dist=pi,\nposition=pos,\n)\npolicy_constraint = self._estimate_policy_constraint(\naction=a,\npscore=p,\n- action_dist=action_dist_by_current_policy,\n+ action_dist=pi,\n)\n- variance_constraint = torch.var(policy_value_arr)\n- negative_loss = policy_value_arr.mean()\n- negative_loss += self.policy_reg_param * policy_constraint\n- negative_loss -= self.var_reg_param * variance_constraint\n- loss = -negative_loss\n+ loss = -policy_grad_arr.mean()\n+ loss += self.policy_reg_param * policy_constraint\n+ loss += self.var_reg_param * torch.var(policy_grad_arr)\nloss.backward()\noptimizer.step()\n@@ -1116,25 +1114,23 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nif self.early_stopping:\nself.nn_model.eval()\nfor x, a, r, p, pos in validation_data_loader:\n- action_dist_by_current_policy = self.nn_model(x).unsqueeze(-1)\n- policy_value_arr = self._estimate_policy_value(\n+ pi = self.nn_model(x).unsqueeze(-1)\n+ policy_grad_arr = -self._estimate_policy_gradient(\ncontext=x,\nreward=r,\naction=a,\npscore=p,\n- action_dist=action_dist_by_current_policy,\n+ action_dist=pi,\nposition=pos,\n)\npolicy_constraint = self._estimate_policy_constraint(\naction=a,\npscore=p,\n- action_dist=action_dist_by_current_policy,\n+ action_dist=pi,\n)\n- variance_constraint = torch.var(policy_value_arr)\n- negative_loss = policy_value_arr.mean()\n- negative_loss += self.policy_reg_param * policy_constraint\n- negative_loss -= self.var_reg_param * variance_constraint\n- loss = -negative_loss\n+ loss = -policy_grad_arr.mean()\n+ loss += self.policy_reg_param * policy_constraint\n+ loss += self.var_reg_param * torch.var(policy_grad_arr)\nloss_value = loss.item()\nif previous_validation_loss is not None:\nif loss_value - previous_validation_loss < self.tol:\n@@ -1145,7 +1141,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nbreak\nprevious_validation_loss = loss_value\n- def _estimate_policy_value(\n+ def _estimate_policy_gradient(\nself,\ncontext: torch.Tensor,\naction: torch.Tensor,\n@@ -1154,7 +1150,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\naction_dist: torch.Tensor,\nposition: torch.Tensor,\n) -> torch.Tensor:\n- \"\"\"Calculate policy loss used in the policy gradient method.\n+ \"\"\"Estimate the policy gradient.\nParameters\n-----------\n@@ -1175,7 +1171,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nReturns\n----------\n- estimated_policy_grad: array-like, shape (batch_size,)\n+ estimated_policy_grad_arr: array-like, shape (batch_size,)\nRewards of each data estimated by an OPE estimator.\n\"\"\"\n@@ -1187,12 +1183,12 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nq_hat = self.q_func_estimator.predict(\ncontext=context,\n)\n- estimated_policy_grad = torch.sum(q_hat * current_pi * log_prob, dim=1)\n+ estimated_policy_grad_arr = torch.sum(q_hat * current_pi * log_prob, dim=1)\nelif self.off_policy_objective == \"ipw\":\niw = current_pi[idx_tensor, action] / pscore\n- estimated_policy_grad = iw * reward\n- estimated_policy_grad *= log_prob[idx_tensor, action]\n+ estimated_policy_grad_arr = iw * reward\n+ estimated_policy_grad_arr *= log_prob[idx_tensor, action]\nelif self.off_policy_objective == \"dr\":\nq_hat = self.q_func_estimator.predict(\n@@ -1200,11 +1196,11 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n)\nq_hat_factual = q_hat[idx_tensor, action]\niw = current_pi[idx_tensor, action] / pscore\n- estimated_policy_grad = iw * (reward - q_hat_factual)\n- estimated_policy_grad *= log_prob[idx_tensor, action]\n- estimated_policy_grad += torch.sum(q_hat * current_pi * log_prob, dim=1)\n+ estimated_policy_grad_arr = iw * (reward - q_hat_factual)\n+ estimated_policy_grad_arr *= log_prob[idx_tensor, action]\n+ estimated_policy_grad_arr += torch.sum(q_hat * current_pi * log_prob, dim=1)\n- return estimated_policy_grad\n+ return estimated_policy_grad_arr\ndef _estimate_policy_constraint(\nself,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline_continuous.py",
"new_path": "obp/policy/offline_continuous.py",
"diff": "@@ -157,7 +157,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nsolver: str = \"adam\"\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\n- learning_rate_init: float = 0.0001\n+ learning_rate_init: float = 0.001\nmax_iter: int = 100\nshuffle: bool = True\nrandom_state: Optional[int] = None\n@@ -478,16 +478,13 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nfor x, a, r, p in training_data_loader:\noptimizer.zero_grad()\naction_by_current_policy = self.nn_model(x).flatten()\n- loss = (\n- -1.0\n- * self._estimate_policy_value(\n+ loss = -self._estimate_policy_gradient(\ncontext=x,\nreward=r,\naction=a,\npscore=p,\naction_by_current_policy=action_by_current_policy,\n).mean()\n- )\nloss.backward()\noptimizer.step()\n@@ -505,16 +502,13 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nself.nn_model.eval()\nfor x, a, r, p in validation_data_loader:\naction_by_current_policy = self.nn_model(x).flatten()\n- loss = (\n- -1.0\n- * self._estimate_policy_value(\n+ loss = -self._estimate_policy_gradient(\ncontext=x,\nreward=r,\naction=a,\npscore=p,\naction_by_current_policy=action_by_current_policy,\n).mean()\n- )\nloss_value = loss.item()\nself.val_loss_curve.append(-loss_value)\nif previous_validation_loss is not None:\n@@ -526,7 +520,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nbreak\nprevious_validation_loss = loss_value\n- def _estimate_policy_value(\n+ def _estimate_policy_gradient(\nself,\ncontext: torch.Tensor,\naction: torch.Tensor,\n@@ -534,7 +528,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\npscore: torch.Tensor,\naction_by_current_policy: torch.Tensor,\n) -> float:\n- \"\"\"Calculate policy loss used in the policy gradient method.\n+ \"\"\"Estimate the policy gradient.\nParameters\n-----------\n@@ -555,7 +549,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nReturns\n----------\n- estimated_policy_value_arr: array-like, shape (batch_size,)\n+ estimated_policy_grad_arr: array-like, shape (batch_size,)\nRewards of each data estimated by an OPE estimator.\n\"\"\"\n@@ -571,7 +565,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\n)\nif self.pg_method == \"dpg\":\n- estimated_policy_value_arr = self.q_func_estimator.predict(\n+ estimated_policy_grad_arr = self.q_func_estimator.predict(\ncontext=context,\naction=action_by_current_policy,\n)\n@@ -579,8 +573,8 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nelif self.pg_method == \"ipw\":\nu = action_by_current_policy - action\nu /= self.bandwidth\n- estimated_policy_value_arr = gaussian_kernel(u) * reward / pscore\n- estimated_policy_value_arr /= self.bandwidth\n+ estimated_policy_grad_arr = gaussian_kernel(u) * reward / pscore\n+ estimated_policy_grad_arr /= self.bandwidth\nelif self.pg_method == \"dr\":\nu = action_by_current_policy - action\n@@ -589,11 +583,11 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\ncontext=context,\naction=action_by_current_policy,\n)\n- estimated_policy_value_arr = gaussian_kernel(u) * (reward - q_hat) / pscore\n- estimated_policy_value_arr /= self.bandwidth\n- estimated_policy_value_arr += q_hat\n+ estimated_policy_grad_arr = gaussian_kernel(u) * (reward - q_hat) / pscore\n+ estimated_policy_grad_arr /= self.bandwidth\n+ estimated_policy_grad_arr += q_hat\n- return estimated_policy_value_arr\n+ return estimated_policy_grad_arr\ndef predict(self, context: np.ndarray) -> np.ndarray:\n\"\"\"Predict best continuous actions for new data.\n@@ -739,7 +733,7 @@ class QFuncEstimatorForContinuousAction:\nsolver: str = \"adam\"\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\n- learning_rate_init: float = 0.0001\n+ learning_rate_init: float = 0.001\nmax_iter: int = 100\nshuffle: bool = True\nrandom_state: Optional[int] = None\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | refactor nnpolicylearner |
641,014 | 03.04.2022 17:19:22 | 14,400 | 10b1f52dd02b1fa3ed2ef3222ed1e18783f404de | fix a bug in nnpolicylearner | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -1082,7 +1082,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nfor x, a, r, p, pos in training_data_loader:\noptimizer.zero_grad()\npi = self.nn_model(x).unsqueeze(-1)\n- policy_grad_arr = -self._estimate_policy_gradient(\n+ policy_grad_arr = self._estimate_policy_gradient(\ncontext=x,\nreward=r,\naction=a,\n@@ -1115,7 +1115,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nself.nn_model.eval()\nfor x, a, r, p, pos in validation_data_loader:\npi = self.nn_model(x).unsqueeze(-1)\n- policy_grad_arr = -self._estimate_policy_gradient(\n+ policy_grad_arr = self._estimate_policy_gradient(\ncontext=x,\nreward=r,\naction=a,\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline_continuous.py",
"new_path": "obp/policy/offline_continuous.py",
"diff": "@@ -487,7 +487,6 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\n).mean()\nloss.backward()\noptimizer.step()\n-\nloss_value = loss.item()\nif previous_training_loss is not None:\nif loss_value - previous_training_loss < self.tol:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix a bug in nnpolicylearner |
641,014 | 03.04.2022 18:22:41 | 14,400 | 52010ce2ff38ea01082bf69f1387c7dd9a6e5740 | modify def init_learning_rate of nnpolicylearner | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -743,7 +743,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nsolver: str = \"adam\"\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\n- learning_rate_init: float = 0.001\n+ learning_rate_init: float = 0.0001\nmax_iter: int = 200\nshuffle: bool = True\nrandom_state: Optional[int] = None\n@@ -1473,7 +1473,7 @@ class QFuncEstimator:\nsolver: str = \"adam\"\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\n- learning_rate_init: float = 0.001\n+ learning_rate_init: float = 0.0001\nmax_iter: int = 200\nshuffle: bool = True\nrandom_state: Optional[int] = None\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline_continuous.py",
"new_path": "obp/policy/offline_continuous.py",
"diff": "@@ -157,7 +157,7 @@ class ContinuousNNPolicyLearner(BaseContinuousOfflinePolicyLearner):\nsolver: str = \"adam\"\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\n- learning_rate_init: float = 0.001\n+ learning_rate_init: float = 0.0001\nmax_iter: int = 100\nshuffle: bool = True\nrandom_state: Optional[int] = None\n@@ -732,7 +732,7 @@ class QFuncEstimatorForContinuousAction:\nsolver: str = \"adam\"\nalpha: float = 0.0001\nbatch_size: Union[int, str] = \"auto\"\n- learning_rate_init: float = 0.001\n+ learning_rate_init: float = 0.0001\nmax_iter: int = 100\nshuffle: bool = True\nrandom_state: Optional[int] = None\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | modify def init_learning_rate of nnpolicylearner |
641,014 | 03.04.2022 18:22:58 | 14,400 | 21b5da2f5fccf9380f5802b1c35bc877f48c1f02 | update example notebooks | [
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/multiclass.ipynb",
"new_path": "examples/quickstart/multiclass.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 1,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 3,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.2\\n\"\n+ \"0.5.3\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/obd.ipynb",
"new_path": "examples/quickstart/obd.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 1,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 3,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.2\\n\"\n+ \"0.5.3\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/online.ipynb",
"new_path": "examples/quickstart/online.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 1,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 3,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.2\\n\"\n+ \"0.5.3\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/synthetic.ipynb",
"new_path": "examples/quickstart/synthetic.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 1,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 3,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.2\\n\"\n+ \"0.5.3\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/synthetic_slate.ipynb",
"new_path": "examples/quickstart/synthetic_slate.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 1,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 3,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 4,\n+ \"execution_count\": 3,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 5,\n+ \"execution_count\": 4,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.2\\n\"\n+ \"0.5.3\\n\"\n]\n}\n],\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 6,\n+ \"execution_count\": 5,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | update example notebooks |
641,014 | 04.04.2022 14:56:53 | 14,400 | 820a5792eb3b842991435c76f204f376a22d90e2 | run with a new version | [
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/multiclass.ipynb",
"new_path": "examples/quickstart/multiclass.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 1,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 3,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.3\\n\"\n+ \"0.5.4\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/obd.ipynb",
"new_path": "examples/quickstart/obd.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 1,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 3,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.3\\n\"\n+ \"0.5.4\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/online.ipynb",
"new_path": "examples/quickstart/online.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 1,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 3,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.3\\n\"\n+ \"0.5.4\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/opl.ipynb",
"new_path": "examples/quickstart/opl.ipynb",
"diff": "\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.3\\n\"\n+ \"0.5.4\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/synthetic.ipynb",
"new_path": "examples/quickstart/synthetic.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 1,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 3,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.3\\n\"\n+ \"0.5.4\\n\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/quickstart/synthetic_slate.ipynb",
"new_path": "examples/quickstart/synthetic_slate.ipynb",
"diff": "},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 1,\n+ \"execution_count\": 2,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 2,\n+ \"execution_count\": 3,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 3,\n+ \"execution_count\": 4,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 4,\n+ \"execution_count\": 5,\n\"metadata\": {},\n\"outputs\": [\n{\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"0.5.3\\n\"\n+ \"0.5.4\\n\"\n]\n}\n],\n},\n{\n\"cell_type\": \"code\",\n- \"execution_count\": 5,\n+ \"execution_count\": 6,\n\"metadata\": {},\n\"outputs\": [],\n\"source\": [\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | run with a new version |
641,014 | 24.04.2022 13:10:22 | 14,400 | 87497eba440d3ae7e0a14a487309acf61ee50e85 | automatic hyperparam sort for slope | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators.py",
"new_path": "obp/ope/estimators.py",
"diff": "@@ -526,11 +526,11 @@ class InverseProbabilityWeighting(BaseOffPolicyEstimator):\n(If only a single action is chosen for each data, you can just ignore this argument.)\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nReturns\n----------\n@@ -1207,11 +1207,11 @@ class DoublyRobust(BaseOffPolicyEstimator):\nEstimated expected rewards given context, action, and position, i.e., :math:`\\\\hat{q}(x_i,a_i)`.\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nReturns\n----------\n@@ -1511,11 +1511,11 @@ class SwitchDoublyRobust(DoublyRobust):\n(If only a single action is chosen for each data, you can just ignore this argument.)\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nReturns\n----------\n@@ -1719,11 +1719,11 @@ class DoublyRobustWithShrinkage(DoublyRobust):\nIndices to differentiate positions in a recommendation interface where the actions are presented.\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nReturns\n----------\n@@ -1907,11 +1907,11 @@ class SubGaussianInverseProbabilityWeighting(InverseProbabilityWeighting):\nIndices to differentiate positions in a recommendation interface where the actions are presented.\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nReturns\n----------\n@@ -2106,11 +2106,11 @@ class SubGaussianDoublyRobust(DoublyRobust):\nIndices to differentiate positions in a recommendation interface where the actions are presented.\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nReturns\n----------\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -41,11 +41,11 @@ class BaseOffPolicyEstimatorTuning:\nwhich improves the original SLOPE proposed by Su et al.(2020).\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nuse_estimated_pscore: bool, default=False.\nIf True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.\n@@ -380,6 +380,8 @@ class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):\nA list of candidate clipping hyperparameters.\nThe automatic hyperparameter tuning procedure proposed by Su et al.(2020)\nor Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+ The candidate hyperparameter values will be sorted automatically to ensure the monotonicity\n+ assumption of SLOPE.\ntuning_method: str, default=\"slope\".\nA method used to tune the hyperparameter of an OPE estimator.\n@@ -388,11 +390,11 @@ class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):\nwhich improves the original SLOPE proposed by Su et al.(2020).\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nuse_estimated_pscore: bool, default=False.\nIf True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.\n@@ -417,6 +419,7 @@ class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):\nself.base_ope_estimator = InverseProbabilityWeighting\nsuper()._check_lambdas()\nsuper()._check_init_inputs()\n+ self.lambdas.sort(reverse=True)\ndef estimate_policy_value(\nself,\n@@ -583,6 +586,8 @@ class DoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nA list of candidate clipping hyperparameters.\nThe automatic hyperparameter tuning procedure proposed by Su et al.(2020)\nor Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+ The candidate hyperparameter values will be sorted automatically to ensure the monotonicity\n+ assumption of SLOPE.\ntuning_method: str, default=\"slope\".\nA method used to tune the hyperparameter of an OPE estimator.\n@@ -614,6 +619,7 @@ class DoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nself.base_ope_estimator = DoublyRobust\nsuper()._check_lambdas()\nsuper()._check_init_inputs()\n+ self.lambdas.sort(reverse=True)\ndef estimate_policy_value(\nself,\n@@ -801,6 +807,8 @@ class SwitchDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nA list of candidate switching hyperparameters.\nThe automatic hyperparameter tuning procedure proposed by Su et al.(2020)\nor Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+ The candidate hyperparameter values will be sorted automatically to ensure the monotonicity\n+ assumption of SLOPE.\ntuning_method: str, default=\"slope\".\nA method used to tune the hyperparameter of an OPE estimator.\n@@ -831,6 +839,7 @@ class SwitchDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nself.base_ope_estimator = SwitchDoublyRobust\nsuper()._check_lambdas()\nsuper()._check_init_inputs()\n+ self.lambdas.sort(reverse=True)\ndef estimate_policy_value(\nself,\n@@ -1018,6 +1027,8 @@ class DoublyRobustWithShrinkageTuning(BaseOffPolicyEstimatorTuning):\nA list of candidate shrinkage hyperparameters.\nThe automatic hyperparameter tuning procedure proposed by Su et al.(2020)\nor Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+ The candidate hyperparameter values will be sorted automatically to ensure the monotonicity\n+ assumption of SLOPE.\ntuning_method: str, default=\"slope\".\nA method used to tune the hyperparameter of an OPE estimator.\n@@ -1048,6 +1059,7 @@ class DoublyRobustWithShrinkageTuning(BaseOffPolicyEstimatorTuning):\nself.base_ope_estimator = DoublyRobustWithShrinkage\nsuper()._check_lambdas()\nsuper()._check_init_inputs()\n+ self.lambdas.sort(reverse=True)\ndef estimate_policy_value(\nself,\n@@ -1234,6 +1246,8 @@ class SubGaussianInverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning)\nA list of candidate hyperparameter values, which should be in the range of [0.0, 1.0].\nThe automatic hyperparameter tuning procedure proposed by Su et al.(2020)\nor Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+ The candidate hyperparameter values will be sorted automatically to ensure the monotonicity\n+ assumption of SLOPE.\ntuning_method: str, default=\"slope\".\nA method used to tune the hyperparameter of an OPE estimator.\n@@ -1242,11 +1256,11 @@ class SubGaussianInverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning)\nwhich improves the original SLOPE proposed by Su et al.(2020).\nuse_bias_upper_bound: bool, default=True\n- Whether to use bias upper bound in hyperparameter tuning.\n+ Whether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\ndelta: float, default=0.05\n- A confidence delta to construct a high probability upper bound based on Bernstein inequality.\n+ A confidence delta to construct a high probability upper bound used in SLOPE.\nuse_estimated_pscore: bool, default=False.\nIf True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.\n@@ -1274,6 +1288,7 @@ class SubGaussianInverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning)\nself.base_ope_estimator = SubGaussianInverseProbabilityWeighting\nsuper()._check_lambdas(max_val=1.0)\nsuper()._check_init_inputs()\n+ self.lambdas.sort()\ndef estimate_policy_value(\nself,\n@@ -1437,6 +1452,8 @@ class SubGaussianDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nA list of candidate hyperparameter values, which should be in the range of [0.0, 1.0].\nThe automatic hyperparameter tuning procedure proposed by Su et al.(2020)\nor Tucker and Lee.(2021) will choose the best hyperparameter value from the logged data.\n+ The candidate hyperparameter values will be sorted automatically to ensure the monotonicity\n+ assumption of SLOPE.\ntuning_method: str, default=\"slope\".\nA method used to tune the hyperparameter of an OPE estimator.\n@@ -1470,6 +1487,7 @@ class SubGaussianDoublyRobustTuning(BaseOffPolicyEstimatorTuning):\nself.base_ope_estimator = SubGaussianDoublyRobust\nsuper()._check_lambdas(max_val=1.0)\nsuper()._check_init_inputs()\n+ self.lambdas.sort()\ndef estimate_policy_value(\nself,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | automatic hyperparam sort for slope |
641,014 | 24.04.2022 14:33:56 | 14,400 | 9ee8704af78dc73fdbf16d79624d1ef33a57a578 | add some off policy gradient estimators | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -348,7 +348,7 @@ class IPWLearner(BaseOfflinePolicyLearner):\n@dataclass\nclass QLearner(BaseOfflinePolicyLearner):\n- \"\"\"Off-policy learner based on Direct Method.\n+ \"\"\"Off-policy learner based on Direct Method (Reward Regression).\nParameters\n-----------\n@@ -628,21 +628,29 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\ndim_context: int\nNumber of dimensions of context vectors.\n- policy_reg_param: float, default=0.0\n- A hypeparameter to control the policy regularization. :math:`\\\\lambda_{pol}`.\n-\n- var_reg_param: float, default=0.0\n- A hypeparameter to control the variance regularization. :math:`\\\\lambda_{var}`.\n-\noff_policy_objective: str\n- An OPE estimator to estimate the objective function.\n- Must be one of `dm`, `ipw`, and `dr`.\n+ An OPE estimator used to estimate the policy gradient.\n+ Must be one of 'dm', 'ipw', 'dr', 'snipw', 'ipw-os', and 'ipw-subgauss'.\nThey stand for\n- Direct Method\n- Inverse Probability Weighting\n- Doubly Robust\n+ - Self-Normalized Inverse Probability Weighting\n+ - Inverse Probability Weighting with Optimistic Shrinkage\n+ - Inverse Probability Weighting with Sungaussian Weight\n, respectively.\n+ lambda_: float, default=np.inf\n+ A hyperparameter used for 'snipw', 'ipw-os', and 'ipw-subgauss'.\n+ When `off_policy_objective`='snipw', `lambda_` is used to shift the reward.\n+ Otherwise, `lambda_` is used to modify or shrinkage the importance weight.\n+\n+ policy_reg_param: float, default=0.0\n+ A hypeparameter to control the policy regularization. :math:`\\\\lambda_{pol}`.\n+\n+ var_reg_param: float, default=0.0\n+ A hypeparameter to control the variance regularization. :math:`\\\\lambda_{var}`.\n+\nhidden_layer_size: Tuple[int, ...], default = (100,)\nThe i-th element specifies the size of the i-th layer.\n@@ -730,12 +738,22 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n\"Adam: A Method for Stochastic Optimization.\", 2014\nJohn Duchi, Elad Hazan, and Yoram Singer.\n- \"Adaptive Subgradient Methods for Online Learning and Stochastic Optimization\", 2011.\n+ \"Adaptive Subgradient Methods for Online Learning and Stochastic Optimization.\", 2011.\n+\n+ Thorsten Joachims, Adith Swaminathan, and Maarten de Rijke.\n+ \"\"Deep Learning for Logged Bandit Feedback.\"\", 2018.\n+\n+ Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.\n+ \"Doubly Robust Off-Policy Evaluation with Shrinkage.\", 2020.\n+\n+ Alberto Maria Metelli, Alessio Russo, and Marcello Restelli.\n+ \"Subgaussian and Differentiable Importance Sampling for Off-Policy Evaluation and Learning.\", 2021.\n\"\"\"\ndim_context: Optional[int] = None\noff_policy_objective: Optional[str] = None\n+ lambda_: Optional[float] = None\npolicy_reg_param: float = 0.0\nvar_reg_param: float = 0.0\nhidden_layer_size: Tuple[int, ...] = (100,)\n@@ -764,12 +782,50 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\ncheck_scalar(self.dim_context, \"dim_context\", int, min_val=1)\n- if self.off_policy_objective not in [\"dm\", \"ipw\", \"dr\"]:\n+ if self.off_policy_objective not in [\n+ \"dm\",\n+ \"ipw\",\n+ \"dr\",\n+ \"snipw\",\n+ \"ipw-os\",\n+ \"ipw-subgauss\",\n+ ]:\nraise ValueError(\n- \"`off_policy_objective` must be one of 'dm', 'ipw', or 'dr'\"\n+ \"`off_policy_objective` must be one of 'dm', 'ipw', 'dr', 'snipw', 'ipw-os', 'ipw-subgauss'\"\nf\", but {self.off_policy_objective} is given\"\n)\n+ if self.off_policy_objective == \"ipw-subgauss\":\n+ check_scalar(\n+ self.lambda_,\n+ \"lambda_\",\n+ (int, float),\n+ min_val=0.0,\n+ max_val=1.0,\n+ )\n+ if self.lambda_ is None:\n+ self.lambda_ = 0.01\n+\n+ elif self.off_policy_objective == \"snipw\":\n+ check_scalar(\n+ self.lambda_,\n+ \"lambda_\",\n+ (int, float),\n+ min_val=0.0,\n+ )\n+ if self.lambda_ is None:\n+ self.lambda_ = 0.0\n+\n+ elif self.off_policy_objective == \"ipw-os\":\n+ check_scalar(\n+ self.lambda_,\n+ \"lambda_\",\n+ (int, float),\n+ min_val=0.0,\n+ )\n+ if self.lambda_ is None:\n+ self.lambda_ = 10000\n+\ncheck_scalar(\nself.policy_reg_param,\n\"policy_reg_param\",\n@@ -1200,6 +1256,23 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nestimated_policy_grad_arr *= log_prob[idx_tensor, action]\nestimated_policy_grad_arr += torch.sum(q_hat * current_pi * log_prob, dim=1)\n+ elif self.off_policy_objective == \"snipw\":\n+ iw = current_pi[idx_tensor, action] / pscore\n+ estimated_policy_grad_arr = iw * (reward - self.lambda_)\n+ estimated_policy_grad_arr *= log_prob[idx_tensor, action]\n+\n+ elif self.off_policy_objective == \"ipw-os\":\n+ iw = current_pi[idx_tensor, action] / pscore\n+ iw_ = (self.lambda_ * iw) / (iw**2 + self.lambda_)\n+ estimated_policy_grad_arr = iw_ * reward\n+ estimated_policy_grad_arr *= log_prob[idx_tensor, action]\n+\n+ elif self.off_policy_objective == \"ipw-subgauss\":\n+ iw = current_pi[idx_tensor, action] / pscore\n+ iw_ = iw / (1 - self.lambda_ + self.lambda_ * iw)\n+ estimated_policy_grad_arr = iw_ * reward\n+ estimated_policy_grad_arr *= log_prob[idx_tensor, action]\n+\nreturn estimated_policy_grad_arr\ndef _estimate_policy_constraint(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/policy/test_offline.py",
"new_path": "tests/policy/test_offline.py",
"diff": "@@ -443,6 +443,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -471,6 +472,7 @@ invalid_input_of_nn_policy_learner_init = [\n-1, #\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -499,6 +501,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n-1, #\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -527,6 +530,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\nNone, #\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -548,13 +552,14 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\nValueError,\n- \"`off_policy_objective` must be one of 'dm', 'ipw', or 'dr'\",\n+ \"`off_policy_objective` must be one of 'dm', 'ipw', 'dr'\",\n),\n(\n10,\n1,\n2,\n\"dros\", #\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -576,13 +581,159 @@ invalid_input_of_nn_policy_learner_init = [\n1e-8,\n10,\nValueError,\n- \"`off_policy_objective` must be one of 'dm', 'ipw', or 'dr'\",\n+ \"`off_policy_objective` must be one of 'dm', 'ipw', 'dr'\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ \"snipw\",\n+ -1.0, #\n+ 0.1,\n+ 0.1,\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ ValueError,\n+ \"lambda_ == -1.0, must be >= 0.\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ \"ipw-os\",\n+ -1.0, #\n+ 0.1,\n+ 0.1,\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ ValueError,\n+ \"lambda_ == -1.0, must be >= 0.\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ \"ipw-subgauss\",\n+ -1.0, #\n+ 0.1,\n+ 0.1,\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ ValueError,\n+ \"lambda_ == -1.0, must be >= 0.\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ \"ipw-subgauss\",\n+ 2.0, #\n+ 0.1,\n+ 0.1,\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ ValueError,\n+ \"lambda_ == 2.0, must be <= 1.\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ \"ipw-subgauss\",\n+ \"-1.0\", #\n+ 0.1,\n+ 0.1,\n+ (100, 50, 100),\n+ \"relu\",\n+ \"adam\",\n+ 0.001,\n+ \"auto\",\n+ 0.0001,\n+ 200,\n+ True,\n+ 123,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ TypeError,\n+ r\"lambda_ must be an instance of \\(<class 'int'>, <class 'float'>\\), not <class 'str'>.\",\n),\n(\n10,\n1,\n2,\n\"dr\",\n+ 0.0,\n\"\", #\n0.1,\n(100, 50, 100),\n@@ -611,6 +762,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"dr\",\n+ 0.0,\nNone, #\n0.1,\n(100, 50, 100),\n@@ -639,6 +791,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"dr\",\n+ 0.0,\n-1.0, #\n0.1,\n(100, 50, 100),\n@@ -667,6 +820,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"dr\",\n+ 0.0,\n0.1,\n\"\", #\n(100, 50, 100),\n@@ -695,6 +849,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"dr\",\n+ 0.0,\n0.1,\nNone, #\n(100, 50, 100),\n@@ -723,6 +878,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"dr\",\n+ 0.0,\n0.1,\n-1.0, #\n(100, 50, 100),\n@@ -751,6 +907,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, \"\"), #\n@@ -779,6 +936,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -807,6 +965,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -835,6 +994,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -863,6 +1023,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -891,6 +1052,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -919,6 +1081,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -947,6 +1110,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -975,6 +1139,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1003,6 +1168,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1031,6 +1197,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1059,6 +1226,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1087,6 +1255,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1115,6 +1284,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1143,6 +1313,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1171,6 +1342,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1199,6 +1371,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1227,6 +1400,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1255,6 +1429,7 @@ invalid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1286,6 +1461,7 @@ valid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"ipw\",\n+ 0.0,\n0.1,\n0.1,\n(100, 50, 100),\n@@ -1313,9 +1489,94 @@ valid_input_of_nn_policy_learner_init = [\n1,\n2,\n\"dr\",\n+ 0.0,\n0.1,\n0.1,\n- (100, 50, 100),\n+ (100,),\n+ \"logistic\",\n+ \"sgd\",\n+ 0.001,\n+ 50,\n+ 0.0001,\n+ 200,\n+ True,\n+ None,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ \"valid input\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ \"snipw\",\n+ 1.0,\n+ 0.1,\n+ 0.1,\n+ (100,),\n+ \"logistic\",\n+ \"sgd\",\n+ 0.001,\n+ 50,\n+ 0.0001,\n+ 200,\n+ True,\n+ None,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ \"valid input\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ \"ipw-os\",\n+ 100,\n+ 0.1,\n+ 0.1,\n+ (100,),\n+ \"logistic\",\n+ \"sgd\",\n+ 0.001,\n+ 50,\n+ 0.0001,\n+ 200,\n+ True,\n+ None,\n+ 1e-4,\n+ 0.9,\n+ True,\n+ True,\n+ 0.1,\n+ 0.9,\n+ 0.999,\n+ 1e-8,\n+ 10,\n+ \"valid input\",\n+ ),\n+ (\n+ 10,\n+ 1,\n+ 2,\n+ \"ipw-subgauss\",\n+ 0.5,\n+ 0.1,\n+ 0.1,\n+ (100,),\n\"logistic\",\n\"sgd\",\n0.001,\n@@ -1339,7 +1600,7 @@ valid_input_of_nn_policy_learner_init = [\[email protected](\n- \"n_actions, len_list, dim_context, off_policy_objective, policy_reg_param, var_reg_param, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, err, description\",\n+ \"n_actions, len_list, dim_context, off_policy_objective, lambda_, policy_reg_param, var_reg_param, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, err, description\",\ninvalid_input_of_nn_policy_learner_init,\n)\ndef test_nn_policy_learner_init_using_invalid_inputs(\n@@ -1347,6 +1608,7 @@ def test_nn_policy_learner_init_using_invalid_inputs(\nlen_list,\ndim_context,\noff_policy_objective,\n+ lambda_,\npolicy_reg_param,\nvar_reg_param,\nhidden_layer_size,\n@@ -1376,6 +1638,7 @@ def test_nn_policy_learner_init_using_invalid_inputs(\nlen_list=len_list,\ndim_context=dim_context,\noff_policy_objective=off_policy_objective,\n+ lambda_=lambda_,\npolicy_reg_param=policy_reg_param,\nvar_reg_param=var_reg_param,\nhidden_layer_size=hidden_layer_size,\n@@ -1400,7 +1663,7 @@ def test_nn_policy_learner_init_using_invalid_inputs(\[email protected](\n- \"n_actions, len_list, dim_context, off_policy_objective, policy_reg_param, var_reg_param, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, description\",\n+ \"n_actions, len_list, dim_context, off_policy_objective, lambda_, policy_reg_param, var_reg_param, hidden_layer_size, activation, solver, alpha, batch_size, learning_rate_init, max_iter, shuffle, random_state, tol, momentum, nesterovs_momentum, early_stopping, validation_fraction, beta_1, beta_2, epsilon, n_iter_no_change, description\",\nvalid_input_of_nn_policy_learner_init,\n)\ndef test_nn_policy_learner_init_using_valid_inputs(\n@@ -1408,6 +1671,7 @@ def test_nn_policy_learner_init_using_valid_inputs(\nlen_list,\ndim_context,\noff_policy_objective,\n+ lambda_,\npolicy_reg_param,\nvar_reg_param,\nhidden_layer_size,\n@@ -1435,6 +1699,7 @@ def test_nn_policy_learner_init_using_valid_inputs(\nlen_list=len_list,\ndim_context=dim_context,\noff_policy_objective=off_policy_objective,\n+ lambda_=lambda_,\npolicy_reg_param=policy_reg_param,\nvar_reg_param=var_reg_param,\nhidden_layer_size=hidden_layer_size,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | add some off policy gradient estimators |
641,014 | 24.04.2022 21:26:48 | 14,400 | 437ab5cce135f2fb0b7f6608c6909cbe4eef8416 | fix the automatic hyperparam sorting of dr-os | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_tuning.py",
"new_path": "obp/ope/estimators_tuning.py",
"diff": "@@ -44,7 +44,7 @@ class BaseOffPolicyEstimatorTuning:\nWhether to use a bias upper bound in hyperparameter tuning.\nIf False, the direct bias estimator is used to estimate the MSE. See Su et al.(2020) for details.\n- delta: float, default=0.05\n+ delta: float, default=0.1\nA confidence delta to construct a high probability upper bound used in SLOPE.\nuse_estimated_pscore: bool, default=False.\n@@ -70,7 +70,7 @@ class BaseOffPolicyEstimatorTuning:\nlambdas: List[float] = None\ntuning_method: str = \"slope\"\nuse_bias_upper_bound: bool = True\n- delta: float = 0.05\n+ delta: float = 0.1\nuse_estimated_pscore: bool = False\ndef __new__(cls, *args, **kwargs):\n@@ -151,7 +151,6 @@ class BaseOffPolicyEstimatorTuning:\n) -> float:\n\"\"\"Find the best hyperparameter value from the candidate set by SLOPE.\"\"\"\nC = np.sqrt(6) - 1\n- theta_list, cnf_list = [], []\ntheta_list_for_sort, cnf_list_for_sort = [], []\nfor hyperparam_ in self.lambdas:\nestimated_round_rewards = self.base_ope_estimator(\n@@ -172,6 +171,7 @@ class BaseOffPolicyEstimatorTuning:\n)\ncnf_list_for_sort.append(cnf)\n+ theta_list, cnf_list = [], []\nsorted_idx_list = np.argsort(cnf_list_for_sort)[::-1]\nfor i, idx in enumerate(sorted_idx_list):\ncnf_i = cnf_list_for_sort[idx]\n@@ -1059,7 +1059,7 @@ class DoublyRobustWithShrinkageTuning(BaseOffPolicyEstimatorTuning):\nself.base_ope_estimator = DoublyRobustWithShrinkage\nsuper()._check_lambdas()\nsuper()._check_init_inputs()\n- self.lambdas.sort(reverse=True)\n+ self.lambdas.sort()\ndef estimate_policy_value(\nself,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix the automatic hyperparam sorting of dr-os |
641,014 | 24.04.2022 22:35:23 | 14,400 | 02493699e0cdf27963a76d8110d1df6c6b0389f5 | fix added policy gradients | [
{
"change_type": "MODIFY",
"old_path": "obp/policy/offline.py",
"new_path": "obp/policy/offline.py",
"diff": "@@ -796,6 +796,8 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\n)\nif self.off_policy_objective == \"ipw-subgauss\":\n+ if self.lambda_ is None:\n+ self.lambda_ = 0.001\ncheck_scalar(\nself.lambda_,\n\"lambda_\",\n@@ -803,28 +805,26 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nmin_val=0.0,\nmax_val=1.0,\n)\n- if self.lambda_ is None:\n- self.lambda_ = 0.01\nelif self.off_policy_objective == \"snipw\":\n+ if self.lambda_ is None:\n+ self.lambda_ = 0.0\ncheck_scalar(\nself.lambda_,\n\"lambda_\",\n(int, float),\nmin_val=0.0,\n)\n- if self.lambda_ is None:\n- self.lambda_ = 0.0\nelif self.off_policy_objective == \"ipw-os\":\n+ if self.lambda_ is None:\n+ self.lambda_ = 10000\ncheck_scalar(\nself.lambda_,\n\"lambda_\",\n(int, float),\nmin_val=0.0,\n)\n- if self.lambda_ is None:\n- self.lambda_ = 10000\ncheck_scalar(\nself.policy_reg_param,\n@@ -939,7 +939,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nself.nn_model = nn.Sequential(OrderedDict(layer_list))\n- if self.off_policy_objective != \"ipw\":\n+ if self.off_policy_objective in [\"dr\", \"dm\"]:\nif self.q_func_estimator_hyperparams is not None:\nself.q_func_estimator_hyperparams[\"n_actions\"] = self.n_actions\nself.q_func_estimator_hyperparams[\"dim_context\"] = self.dim_context\n@@ -1090,7 +1090,7 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nposition = np.zeros_like(action, dtype=int)\n# train q function estimator when it is needed to train NNPolicy\n- if self.off_policy_objective != \"ipw\":\n+ if self.off_policy_objective in [\"dr\", \"dm\"]:\nself.q_func_estimator.fit(\ncontext=context,\naction=action,\n@@ -1263,13 +1263,15 @@ class NNPolicyLearner(BaseOfflinePolicyLearner):\nelif self.off_policy_objective == \"ipw-os\":\niw = current_pi[idx_tensor, action] / pscore\n- iw_ = (self.lambda_ * iw) / (iw**2 + self.lambda_)\n+ iw_ = (self.lambda_ - (iw**2)) / ((iw**2 + self.lambda_) ** 2)\n+ iw_ *= self.lambda_ * iw\nestimated_policy_grad_arr = iw_ * reward\nestimated_policy_grad_arr *= log_prob[idx_tensor, action]\nelif self.off_policy_objective == \"ipw-subgauss\":\niw = current_pi[idx_tensor, action] / pscore\n- iw_ = iw / (1 - self.lambda_ + self.lambda_ * iw)\n+ iw_ = (1 - self.lambda_) * iw\n+ iw_ /= (1 - self.lambda_ + self.lambda_ * iw) ** 2\nestimated_policy_grad_arr = iw_ * reward\nestimated_policy_grad_arr *= log_prob[idx_tensor, action]\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix added policy gradients |
641,014 | 28.04.2022 16:59:32 | 14,400 | 7f2ab7857ad25f9f9a20d302369bd0b3e95af397 | fix error checkings on p_e_a | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/meta.py",
"new_path": "obp/ope/meta.py",
"diff": "@@ -149,15 +149,26 @@ class OffPolicyEvaluation:\npass\nelif isinstance(value_or_dict, dict):\nfor estimator_name, value in value_or_dict.items():\n+ expected_dim = 1\n+ if var_name in [\"p_e_a\", \"pi_b\"]:\n+ expected_dim = 3\n+ elif var_name in [\"action_embed\"]:\n+ expected_dim = 2\ncheck_array(\narray=value,\nname=f\"{var_name}[{estimator_name}]\",\n- expected_dim=1,\n+ expected_dim=expected_dim,\n)\n+ if var_name != \"p_e_a\":\nif value.shape[0] != action_dist.shape[0]:\nraise ValueError(\nf\"Expected `{var_name}[{estimator_name}].shape[0] == action_dist.shape[0]`, but found it False\"\n)\n+ else:\n+ if value.shape[0] != action_dist.shape[1]:\n+ raise ValueError(\n+ f\"Expected `{var_name}[{estimator_name}].shape[0] == action_dist.shape[1]`, but found it False\"\n+ )\nelse:\nexpected_dim = 1\nif var_name in [\"p_e_a\", \"pi_b\"]:\n@@ -172,6 +183,11 @@ class OffPolicyEvaluation:\nraise ValueError(\nf\"Expected `{var_name}.shape[0] == action_dist.shape[0]`, but found it False\"\n)\n+ else:\n+ if value.shape[0] != action_dist.shape[1]:\n+ raise ValueError(\n+ f\"Expected `{var_name}[{estimator_name}].shape[0] == action_dist.shape[1]`, but found it False\"\n+ )\nestimator_inputs = {\nestimator_name: {\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix error checkings on p_e_a |
641,011 | 10.05.2022 10:15:46 | -32,400 | cb80f49fd448dfe85e29dcfead848c93a404d879 | fix expected reward factual in indepedent case | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_slate.py",
"new_path": "obp/dataset/synthetic_slate.py",
"diff": "@@ -1441,7 +1441,7 @@ def action_interaction_reward_function(\naction_2d[:, pos_],\n]\nif reward_structure == \"independent\":\n- continue\n+ pass\nelif is_additive:\nfor pos2_ in np.arange(len_list):\nif is_cascade:\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix expected reward factual in indepedent case |
641,014 | 15.06.2022 12:43:03 | -32,400 | 83f3945f3060b59dd2ca1f2995f8eadb0bacb992 | allow slope to use the true maginal importance weight for mips | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_embed.py",
"new_path": "obp/ope/estimators_embed.py",
"diff": "@@ -303,6 +303,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nposition=position,\npi_b=pi_b,\naction_dist=action_dist,\n+ p_e_a=p_e_a,\n)\nelif self.embedding_selection_method == \"greedy\":\nreturn self._estimate_with_greedy_pruning(\n@@ -313,6 +314,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nposition=position,\npi_b=pi_b,\naction_dist=action_dist,\n+ p_e_a=p_e_a,\n)\nelse:\nreturn self._estimate_round_rewards(\n@@ -335,6 +337,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\npi_b: np.ndarray,\naction_dist: np.ndarray,\nposition: np.ndarray,\n+ p_e_a: Optional[np.ndarray] = None,\n) -> float:\n\"\"\"Apply an exact version of data-drive action embedding selection.\"\"\"\nn_emb_dim = action_embed.shape[1]\n@@ -352,6 +355,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\npi_b=pi_b,\naction_dist=action_dist,\nposition=position,\n+ p_e_a=p_e_a[:, :, comb],\nwith_dev=True,\n)\nif len(theta_list) > 0:\n@@ -380,6 +384,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\npi_b: np.ndarray,\naction_dist: np.ndarray,\nposition: np.ndarray,\n+ p_e_a: Optional[np.ndarray] = None,\n) -> float:\n\"\"\"Apply a greedy version of data-drive action embedding selection.\"\"\"\nn_emb_dim = action_embed.shape[1]\n@@ -395,6 +400,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\npi_b=pi_b,\naction_dist=action_dist,\nposition=position,\n+ p_e_a=p_e_a[:, :, current_feat],\nwith_dev=True,\n)\ntheta_list.append(theta), cnf_list.append(cnf)\n@@ -413,6 +419,7 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\npi_b=pi_b,\naction_dist=action_dist,\nposition=position,\n+ p_e_a=p_e_a[:, :, candidate_feat],\nwith_dev=True,\n)\nd_list_.append(d)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | allow slope to use the true maginal importance weight for mips |
641,014 | 15.06.2022 13:22:16 | -32,400 | 0e94113752859305765e1ceebbc096ca6da8873d | fix a bug in slope in mips | [
{
"change_type": "MODIFY",
"old_path": "obp/ope/estimators_embed.py",
"new_path": "obp/ope/estimators_embed.py",
"diff": "@@ -347,6 +347,18 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\ncomb_list = list(itertools.combinations(feat_list, i))\ntheta_list_, cnf_list_ = [], []\nfor comb in comb_list:\n+ if p_e_a is None:\n+ theta, cnf = self._estimate_round_rewards(\n+ context=context,\n+ reward=reward,\n+ action=action,\n+ action_embed=action_embed[:, comb],\n+ pi_b=pi_b,\n+ action_dist=action_dist,\n+ position=position,\n+ with_dev=True,\n+ )\n+ else:\ntheta, cnf = self._estimate_round_rewards(\ncontext=context,\nreward=reward,\n@@ -392,6 +404,18 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\ncurrent_feat, C = np.arange(n_emb_dim), np.sqrt(6) - 1\n# init\n+ if p_e_a is None:\n+ theta, cnf = self._estimate_round_rewards(\n+ context=context,\n+ reward=reward,\n+ action=action,\n+ action_embed=action_embed[:, current_feat],\n+ pi_b=pi_b,\n+ action_dist=action_dist,\n+ position=position,\n+ with_dev=True,\n+ )\n+ else:\ntheta, cnf = self._estimate_round_rewards(\ncontext=context,\nreward=reward,\n@@ -411,6 +435,18 @@ class MarginalizedInverseProbabilityWeighting(BaseOffPolicyEstimator):\nfor d in current_feat:\nidx_without_d = np.where(current_feat != d, True, False)\ncandidate_feat = current_feat[idx_without_d]\n+ if p_e_a is None:\n+ theta, cnf = self._estimate_round_rewards(\n+ context=context,\n+ reward=reward,\n+ action=action,\n+ action_embed=action_embed[:, candidate_feat],\n+ pi_b=pi_b,\n+ action_dist=action_dist,\n+ position=position,\n+ with_dev=True,\n+ )\n+ else:\ntheta, cnf = self._estimate_round_rewards(\ncontext=context,\nreward=reward,\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | fix a bug in slope in mips |
641,008 | 29.06.2022 16:36:51 | -7,200 | f7918ad8d4641ee2dd1a99050c9751cb68852641 | Add factual rewards to synthetic data generation and enable online bandit simulations | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -237,14 +237,16 @@ class SyntheticBanditDataset(BaseBanditDataset):\ndef sample_reward_given_expected_reward(\nself,\nexpected_reward: np.ndarray,\n- action: np.ndarray,\n+ action: Optional[np.ndarray] = None,\n) -> np.ndarray:\n+ if action is not None:\n+ expected_reward = expected_reward[np.arange(action.shape[0]), action]\n+\n\"\"\"Sample reward given expected rewards\"\"\"\n- expected_reward_factual = expected_reward[np.arange(action.shape[0]), action]\nif RewardType(self.reward_type) == RewardType.BINARY:\n- reward = self.random_.binomial(n=1, p=expected_reward_factual)\n+ reward = self.random_.binomial(n=1, p=expected_reward)\nelif RewardType(self.reward_type) == RewardType.CONTINUOUS:\n- mean = expected_reward_factual\n+ mean = expected_reward\na = (self.reward_min - mean) / self.reward_std\nb = (self.reward_max - mean) / self.reward_std\nreward = truncnorm.rvs(\n@@ -346,7 +348,8 @@ class SyntheticBanditDataset(BaseBanditDataset):\nactions = sample_action_fast(pi_b, random_state=self.random_state)\n# sample rewards based on the context and action\n- rewards = self.sample_reward_given_expected_reward(expected_reward_, actions)\n+ factual_reward = self.sample_reward_given_expected_reward(expected_reward_)\n+ rewards = factual_reward[np.arange(actions.shape[0]), actions]\nreturn dict(\nn_rounds=n_rounds,\n@@ -356,6 +359,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\naction=actions,\nposition=None, # position effect is not considered in synthetic data\nreward=rewards,\n+ factual_reward=factual_reward,\nexpected_reward=expected_reward_,\npi_b=pi_b[:, :, np.newaxis],\npscore=pi_b[np.arange(n_rounds), actions],\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic_multi.py",
"new_path": "obp/dataset/synthetic_multi.py",
"diff": "@@ -305,7 +305,8 @@ class SyntheticMultiLoggersBanditDataset(SyntheticBanditDataset):\npi_b_avg += rho * softmax(beta * pi_b_logits)\n# sample rewards based on the context and action\n- rewards = self.sample_reward_given_expected_reward(expected_reward_, actions)\n+ factual_reward = self.sample_reward_given_expected_reward(expected_reward_)\n+ rewards = factual_reward[np.arange(actions.shape[0]), actions]\nreturn dict(\nn_rounds=n_rounds,\n@@ -316,6 +317,7 @@ class SyntheticMultiLoggersBanditDataset(SyntheticBanditDataset):\naction=actions,\nposition=None, # position effect is not considered in synthetic data\nreward=rewards,\n+ factual_reward=factual_reward,\nexpected_reward=expected_reward_,\nstratum_idx=stratum_idx,\npi_b=pi_b[:, :, np.newaxis],\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "@@ -40,30 +40,29 @@ def run_bandit_simulation(\nAction choice probabilities (can be deterministic).\n\"\"\"\n- for key_ in [\"action\", \"position\", \"reward\", \"pscore\", \"context\"]:\n+ for key_ in [\"position\", \"reward\", \"factual_reward\", \"pscore\", \"context\"]:\nif key_ not in bandit_feedback:\nraise RuntimeError(f\"Missing key of {key_} in 'bandit_feedback'.\")\ncheck_bandit_feedback_inputs(\ncontext=bandit_feedback[\"context\"],\naction=bandit_feedback[\"action\"],\nreward=bandit_feedback[\"reward\"],\n+ factual_reward=bandit_feedback[\"factual_reward\"],\nposition=bandit_feedback[\"position\"],\npscore=bandit_feedback[\"pscore\"],\n)\npolicy_ = policy\nselected_actions_list = list()\n- dim_context = bandit_feedback[\"context\"].shape[1]\nif bandit_feedback[\"position\"] is None:\nbandit_feedback[\"position\"] = np.zeros_like(\nbandit_feedback[\"action\"], dtype=int\n)\n- for action_, reward_, position_, context_ in tqdm(\n+ for position_, context_, factual_reward in tqdm(\nzip(\n- bandit_feedback[\"action\"],\n- bandit_feedback[\"reward\"],\nbandit_feedback[\"position\"],\nbandit_feedback[\"context\"],\n+ bandit_feedback[\"factual_reward\"],\n),\ntotal=bandit_feedback[\"n_rounds\"],\n):\n@@ -72,28 +71,54 @@ def run_bandit_simulation(\nif policy_.policy_type == PolicyType.CONTEXT_FREE:\nselected_actions = policy_.select_action()\nelif policy_.policy_type == PolicyType.CONTEXTUAL:\n- selected_actions = policy_.select_action(context_.reshape(1, dim_context))\n- action_match_ = action_ == selected_actions[position_]\n- # update parameters of a bandit policy\n- # only when selected actions&positions are equal to logged actions&positions\n- if action_match_:\n- if policy_.policy_type == PolicyType.CONTEXT_FREE:\n- policy_.update_params(action=action_, reward=reward_)\n- elif policy_.policy_type == PolicyType.CONTEXTUAL:\n- policy_.update_params(\n- action=action_,\n- reward=reward_,\n- context=context_.reshape(1, dim_context),\n+ selected_actions = policy_.select_action(np.expand_dims(context_, axis=0))\n+ else:\n+ raise RuntimeError(\n+ f\"Policy type {policy_.policy_type} of policy {policy_.policy_name} is unsupported\"\n)\n+\n+ action_ = selected_actions[position_]\n+ reward_ = factual_reward[action_]\n+\n+ update_policy(policy_, context_, action_, reward_)\nselected_actions_list.append(selected_actions)\naction_dist = convert_to_action_dist(\n- n_actions=bandit_feedback[\"action\"].max() + 1,\n+ n_actions=policy.n_actions,\nselected_actions=np.array(selected_actions_list),\n)\nreturn action_dist\n+def update_policy(\n+ policy: BanditPolicy, context: np.ndarray, action: int, reward: int\n+) -> None:\n+ \"\"\"Run an online bandit algorithm on the given logged bandit feedback data.\n+\n+ Parameters\n+ ----------\n+ policy: BanditPolicy\n+ Online bandit policy to be updated.\n+\n+ context: np.ndarray\n+ Context in which the policy observed the reward\n+\n+ action: int\n+ Action taken by the policy as defined by the `policy` argument\n+\n+ reward: int\n+ Reward observed by the policy as defined by the `policy` argument\n+ \"\"\"\n+ if policy.policy_type == PolicyType.CONTEXT_FREE:\n+ policy.update_params(action=action, reward=reward)\n+ elif policy.policy_type == PolicyType.CONTEXTUAL:\n+ policy.update_params(\n+ action=action,\n+ reward=reward,\n+ context=np.expand_dims(context, axis=0),\n+ )\n+\n+\ndef calc_ground_truth_policy_value(\nbandit_feedback: BanditFeedback,\nreward_sampler: Callable[[np.ndarray, np.ndarray], float],\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/utils.py",
"new_path": "obp/utils.py",
"diff": "@@ -210,10 +210,11 @@ def check_bandit_feedback_inputs(\naction: np.ndarray,\nreward: np.ndarray,\nexpected_reward: Optional[np.ndarray] = None,\n+ factual_reward: Optional[np.ndarray] = None,\nposition: Optional[np.ndarray] = None,\npscore: Optional[np.ndarray] = None,\naction_context: Optional[np.ndarray] = None,\n-) -> Optional[ValueError]:\n+) -> None:\n\"\"\"Check inputs for bandit learning or simulation.\nParameters\n@@ -230,6 +231,9 @@ def check_bandit_feedback_inputs(\nexpected_reward: array-like, shape (n_rounds, n_actions), default=None\nExpected reward of each data, i.e., :math:`\\\\mathbb{E}[r_i|x_i,a_i]`.\n+ factual_reward: array-like, shape (n_rounds, n_actions), default=None\n+ Full information rewards for each action sampled from the `expected_reward`.\n+\nposition: array-like, shape (n_rounds,), default=None\nIndices to differentiate positions in a recommendation interface where the actions are presented.\n@@ -266,6 +270,22 @@ def check_bandit_feedback_inputs(\nelse:\nif not (np.issubdtype(action.dtype, np.integer) and action.min() >= 0):\nraise ValueError(\"`action` elements must be non-negative integers\")\n+ if factual_reward is not None:\n+ check_array(array=factual_reward, name=\"expected_reward\", expected_dim=2)\n+ if not (\n+ context.shape[0]\n+ == action.shape[0]\n+ == reward.shape[0]\n+ == factual_reward.shape[0]\n+ ):\n+ raise ValueError(\n+ \"Expected `context.shape[0] == action.shape[0] == reward.shape[0] == factual_reward.shape[0]`\"\n+ \", but found it False\"\n+ )\n+ if not (np.all(np.choose(action, factual_reward.T) == reward)):\n+ raise ValueError(\n+ \"`factual_reward` should match the `reward` values for each taken action.\"\n+ )\nif pscore is not None:\ncheck_array(array=pscore, name=\"pscore\", expected_dim=1)\nif not (\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/conftest.py",
"new_path": "tests/ope/conftest.py",
"diff": "@@ -149,6 +149,7 @@ def feedback_key_set() -> Set[str]:\n\"action_context\",\n\"context\",\n\"expected_reward\",\n+ \"factual_reward\",\n\"n_actions\",\n\"n_rounds\",\n\"position\",\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/simulator/test_simulator.py",
"diff": "+import re\n+\n+import pytest\n+from obp.policy.linear import LinTS\n+\n+from obp.policy.contextfree import EpsilonGreedy\n+from obp.dataset.synthetic import logistic_reward_function\n+from obp.dataset import SyntheticBanditDataset\n+from obp.policy.policy_type import PolicyType\n+from obp.simulator import run_bandit_simulation\n+\n+\n+def test_run_bandit_simulation_updates_at_each_taken_action():\n+ n_rounds = 100\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ dim_context=5,\n+ reward_type=\"binary\",\n+ reward_function=logistic_reward_function,\n+ random_state=12345,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+\n+ epsilon_greedy = EpsilonGreedy(n_actions=3)\n+ _ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=epsilon_greedy)\n+\n+ assert epsilon_greedy.n_trial == n_rounds\n+\n+\n+def test_run_bandit_simulation_handles_context_in_simulations():\n+ n_rounds = 100\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ dim_context=5,\n+ reward_type=\"binary\",\n+ reward_function=logistic_reward_function,\n+ random_state=12345,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+\n+ lin_ts = LinTS(\n+ dim=dataset.dim_context, n_actions=dataset.n_actions, random_state=12345\n+ )\n+ _ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=lin_ts)\n+\n+ assert lin_ts.n_trial == n_rounds\n+\n+\n+def test_run_bandit_simulation_raises_on_unknown_policy():\n+ n_rounds = 1\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+\n+ class OfflineEpsilon(EpsilonGreedy):\n+ @property\n+ def policy_type(self) -> PolicyType:\n+ return PolicyType.OFFLINE\n+\n+ epsilon_greedy = OfflineEpsilon(n_actions=3)\n+ with pytest.raises(\n+ RuntimeError,\n+ match=re.escape(\n+ r\"Policy type PolicyType.OFFLINE of policy egreedy_1.0 is \" r\"unsupported\"\n+ ),\n+ ):\n+ _ = run_bandit_simulation(\n+ bandit_feedback=bandit_feedback, policy=epsilon_greedy\n+ )\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | Add factual rewards to synthetic data generation and enable online bandit simulations |
641,008 | 11.07.2022 16:24:25 | -7,200 | 9f39574e06488982eceaf00118ac25a9cddff690 | reward independent delays for the simulator | [
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "\"\"\"Bandit Simulator.\"\"\"\nfrom copy import deepcopy\n-from typing import Callable\n+from typing import Callable, Dict, List\nfrom typing import Union\nimport numpy as np\n+import pandas as pd\nfrom tqdm import tqdm\nfrom ..policy import BaseContextFreePolicy\n@@ -58,11 +59,20 @@ def run_bandit_simulation(\nbandit_feedback[\"position\"] = np.zeros_like(\nbandit_feedback[\"action\"], dtype=int\n)\n- for position_, context_, factual_reward in tqdm(\n+\n+ reward_round_lookup = None\n+ if bandit_feedback[\"round_delays\"] is not None:\n+ reward_round_lookup = create_reward_round_lookup(\n+ bandit_feedback[\"round_delays\"]\n+ )\n+\n+ for round_idx, (position_, context_, factual_reward) in tqdm(\n+ enumerate(\nzip(\nbandit_feedback[\"position\"],\nbandit_feedback[\"context\"],\nbandit_feedback[\"factual_reward\"],\n+ )\n),\ntotal=bandit_feedback[\"n_rounds\"],\n):\n@@ -77,11 +87,26 @@ def run_bandit_simulation(\nf\"Policy type {policy_.policy_type} of policy {policy_.policy_name} is unsupported\"\n)\n+ selected_actions_list.append(selected_actions)\naction_ = selected_actions[position_]\nreward_ = factual_reward[action_]\n+ if bandit_feedback[\"round_delays\"] is None:\nupdate_policy(policy_, context_, action_, reward_)\n- selected_actions_list.append(selected_actions)\n+ else:\n+ available_rounds = reward_round_lookup.get(round_idx, [])\n+ delayed_update_policy(\n+ available_rounds, bandit_feedback, selected_actions_list, policy_\n+ )\n+\n+ if available_rounds:\n+ del reward_round_lookup[round_idx]\n+\n+ if bandit_feedback[\"round_delays\"] is not None:\n+ for round_idx, available_rounds in reward_round_lookup.items():\n+ delayed_update_policy(\n+ available_rounds, bandit_feedback, selected_actions_list, policy_\n+ )\naction_dist = convert_to_action_dist(\nn_actions=policy.n_actions,\n@@ -90,6 +115,55 @@ def run_bandit_simulation(\nreturn action_dist\n+def delayed_update_policy(\n+ available_rounds: List[int],\n+ bandits_feedback: BanditFeedback,\n+ selected_actions_list: List[np.ndarray],\n+ policy_,\n+) -> None:\n+ for available_round_idx in available_rounds:\n+ position_ = bandits_feedback[\"position\"][available_round_idx]\n+ available_action = selected_actions_list[available_round_idx][position_]\n+ available_context = bandits_feedback[\"context\"][available_round_idx]\n+ available_factual_reward = bandits_feedback[\"factual_reward\"][\n+ available_round_idx\n+ ][available_action]\n+ update_policy(\n+ policy_, available_context, available_action, available_factual_reward\n+ )\n+\n+\n+def create_reward_round_lookup(round_delays: np.ndarray) -> Dict[int, List[int]]:\n+ \"\"\"Convert an array of round delays to a dict mapping the available rewards for each round.\n+\n+ Parameters\n+ ----------\n+ round_delays: np.ndarray\n+ A 1-dimensional numpy array containing the deltas representing how many rounds should be between the taken\n+ action and reward observation.\n+\n+ Returns\n+ --------\n+ reward_round_lookup: Dict\n+ A dict with the round at which feedback become available as a key and a list with the index of all actions\n+ for which the reward becomes available in that round.\n+\n+ \"\"\"\n+ rounds = np.arange(len(round_delays))\n+\n+ reward_round_pdf = pd.DataFrame(\n+ {\"available_at_round\": rounds + round_delays, \"exposed_at_round\": rounds}\n+ )\n+\n+ reward_round_lookup = (\n+ reward_round_pdf.groupby([\"available_at_round\"])[\"exposed_at_round\"]\n+ .apply(list)\n+ .to_dict()\n+ )\n+\n+ return reward_round_lookup\n+\n+\ndef update_policy(\npolicy: BanditPolicy, context: np.ndarray, action: int, reward: int\n) -> None:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "+from functools import partial\n+\nimport numpy as np\nimport pytest\nfrom obp.dataset import SyntheticBanditDataset\n-from obp.dataset.synthetic import linear_behavior_policy\n+from obp.dataset.synthetic import linear_behavior_policy, ExponentialDelaySampler\nfrom obp.dataset.synthetic import linear_reward_function\nfrom obp.dataset.synthetic import logistic_polynomial_reward_function\nfrom obp.dataset.synthetic import logistic_reward_function\n@@ -309,6 +311,119 @@ def test_synthetic_sample_reward_using_valid_inputs(context, action, description\nassert reward.shape == action.shape, \"Invalid response of sample_reward\"\n+def test_synthetic_sample_results_in_sampled_delay_when_delay_function_is_given():\n+ delay_function = ExponentialDelaySampler(\n+ scale=100.0, random_state=12345\n+ ).exponential_delay_function\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=10,\n+ reward_function=logistic_sparse_reward_function,\n+ delay_function=delay_function,\n+ random_state=12345,\n+ )\n+\n+ actual_bandits_dataset = dataset.obtain_batch_bandit_feedback(n_rounds=5)\n+\n+ assert (\n+ actual_bandits_dataset[\"round_delays\"] == [266.0, 39.0, 21.0, 23.0, 84.0]\n+ ).all()\n+\n+\n+def test_synthetic_sample_results_with_exponential_delay_function_has_different_delays_each_batch():\n+ delay_function = ExponentialDelaySampler(\n+ scale=1000.0, random_state=12345\n+ ).exponential_delay_function\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=10,\n+ reward_function=logistic_sparse_reward_function,\n+ delay_function=delay_function,\n+ random_state=12345,\n+ )\n+\n+ actual_delays_1 = dataset.obtain_batch_bandit_feedback(n_rounds=5)[\"round_delays\"]\n+ actual_delays_2 = dataset.obtain_batch_bandit_feedback(n_rounds=5)[\"round_delays\"]\n+ assert (actual_delays_1 == [2654.0, 381.0, 204.0, 229.0, 839.0]).all()\n+ assert (actual_delays_2 == [906.0, 3339.0, 1059.0, 1382.0, 1061.0]).all()\n+\n+\n+def test_synthetic_sample_results_with_exponential_delay_function_has_same_delays_each_dataset():\n+ delay_function = ExponentialDelaySampler(\n+ scale=1000.0, random_state=12345\n+ ).exponential_delay_function\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=10,\n+ reward_function=logistic_sparse_reward_function,\n+ delay_function=delay_function,\n+ random_state=12345,\n+ )\n+\n+ actual_delays_1 = dataset.obtain_batch_bandit_feedback(n_rounds=5)[\"round_delays\"]\n+\n+ delay_function = ExponentialDelaySampler(\n+ scale=1000.0, random_state=12345\n+ ).exponential_delay_function\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=10,\n+ reward_function=logistic_sparse_reward_function,\n+ delay_function=delay_function,\n+ random_state=12345,\n+ )\n+ actual_delays_2 = dataset.obtain_batch_bandit_feedback(n_rounds=5)[\"round_delays\"]\n+ assert (actual_delays_1 == [2654.0, 381.0, 204.0, 229.0, 839.0]).all()\n+ assert (actual_delays_2 == [2654.0, 381.0, 204.0, 229.0, 839.0]).all()\n+\n+\n+def test_synthetic_sample_results_do_not_contain_reward_delay_when_delay_function_is_none():\n+ dataset = SyntheticBanditDataset(\n+ n_actions=10,\n+ reward_function=logistic_sparse_reward_function,\n+ random_state=12345,\n+ )\n+\n+ actual_bandits_dataset = dataset.obtain_batch_bandit_feedback(n_rounds=5)\n+\n+ assert actual_bandits_dataset[\"round_delays\"] is None\n+\n+\n+def test_synthetic_sample_results_reward_delay_is_configurable_through_delay_function():\n+ def trivial_delay_func(*args, **kwargs):\n+ return np.asarray([1, 2, 3, 4, 5])\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=10,\n+ reward_function=logistic_sparse_reward_function,\n+ delay_function=trivial_delay_func,\n+ random_state=12345,\n+ )\n+\n+ actual_bandits_dataset = dataset.obtain_batch_bandit_feedback(n_rounds=5)\n+\n+ assert (actual_bandits_dataset[\"round_delays\"] == [1, 2, 3, 4, 5]).all()\n+\n+\[email protected](\n+ \"size, random_state, expected_delays\",\n+ [\n+ (5, 12345, [266.0, 39.0, 21.0, 23.0, 84.0]),\n+ (3, 12345, [266.0, 39.0, 21.0]),\n+ (5, 54321, [243.0, 98.0, 157.0, 57.0, 79.0]),\n+ ],\n+)\n+def test_exponential_delay_function_results_in_expected_seeded_discrete_delays(\n+ size, random_state, expected_delays\n+):\n+ delay_function = ExponentialDelaySampler(\n+ scale=100.0, random_state=random_state\n+ ).exponential_delay_function\n+\n+ actual_delays = delay_function(size)\n+ assert (actual_delays == expected_delays).all()\n+\n+\ndef test_synthetic_obtain_batch_bandit_feedback():\n# n_rounds\nwith pytest.raises(ValueError):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_simulator.py",
"new_path": "tests/simulator/test_simulator.py",
"diff": "import re\n+import numpy as np\nimport pytest\n-from obp.policy.linear import LinTS\n-from obp.policy.contextfree import EpsilonGreedy\n-from obp.dataset.synthetic import logistic_reward_function\n+from obp.policy import BaseContextFreePolicy\n+from obp.policy.linear import LinTS, LinUCB\n+\n+from obp.policy.contextfree import EpsilonGreedy, Random, BernoulliTS\n+from obp.dataset.synthetic import logistic_reward_function, sparse_reward_function\nfrom obp.dataset import SyntheticBanditDataset\nfrom obp.policy.policy_type import PolicyType\nfrom obp.simulator import run_bandit_simulation\n@@ -71,3 +74,133 @@ def test_run_bandit_simulation_raises_on_unknown_policy():\n_ = run_bandit_simulation(\nbandit_feedback=bandit_feedback, policy=epsilon_greedy\n)\n+\n+\n+class BanditUpdateTracker(BaseContextFreePolicy):\n+ \"\"\"\n+ This class can be used to keep track on updates being sent to the policy.\n+ The policy itself always provides a random choice and should not be used for any analytical purposes.\n+ \"\"\"\n+\n+ n_rounds = 0\n+\n+ def __post_init__(self) -> None:\n+ self.parameter_updates = []\n+ super().__post_init__()\n+\n+ def select_action(self) -> np.ndarray:\n+ self.n_rounds += 1\n+ return self.random_.choice(self.n_actions, size=self.len_list, replace=False)\n+\n+ def update_params(self, action: int, reward: float) -> None:\n+ self.parameter_updates.append(\n+ {\"round\": self.n_rounds, \"action\": action, \"reward\": reward}\n+ )\n+\n+\n+def test_run_bandit_simulation_applies_policy_in_delay_specified_order():\n+ n_rounds = 5\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ dim_context=1,\n+ reward_type=\"binary\",\n+ reward_function=logistic_reward_function,\n+ random_state=12345,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ bandit_feedback[\"round_delays\"] = np.asarray([2, 1, 2, 1, 0])\n+\n+ tracker = BanditUpdateTracker(n_actions=3, random_state=12345)\n+ _ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=tracker)\n+\n+ expected_updates = [\n+ {\"round\": 3, \"action\": [0], \"reward\": [1]},\n+ {\"round\": 3, \"action\": [0], \"reward\": [1]},\n+ {\"round\": 5, \"action\": [2], \"reward\": [1]},\n+ {\"round\": 5, \"action\": [1], \"reward\": [0]},\n+ {\"round\": 5, \"action\": [2], \"reward\": [0]},\n+ ]\n+\n+ assert tracker.parameter_updates == expected_updates\n+\n+\n+def test_run_bandit_simulation_applies_all_rewards_delayed_till_after_all_rounds_to_the_end_of_simulation():\n+ n_rounds = 5\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ dim_context=1,\n+ reward_type=\"binary\",\n+ reward_function=logistic_reward_function,\n+ random_state=12345,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ bandit_feedback[\"round_delays\"] = np.asarray([2, 1, 2, 2, 2])\n+\n+ tracker = BanditUpdateTracker(n_actions=3, random_state=12345)\n+ _ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=tracker)\n+\n+ expected_updates = [\n+ {\"round\": 3, \"action\": [0], \"reward\": [1]},\n+ {\"round\": 3, \"action\": [0], \"reward\": [1]},\n+ {\"round\": 5, \"action\": [2], \"reward\": [1]},\n+ {\"round\": 5, \"action\": [1], \"reward\": [0]},\n+ {\"round\": 5, \"action\": [2], \"reward\": [0]},\n+ ]\n+\n+ assert tracker.parameter_updates == expected_updates\n+\n+\[email protected](\n+ \"policy\",\n+ [\n+ Random(n_actions=3, epsilon=1.0, random_state=12345),\n+ EpsilonGreedy(n_actions=3, epsilon=0.1, random_state=12345),\n+ BernoulliTS(n_actions=3, random_state=12345),\n+ LinTS(dim=4, n_actions=3, random_state=12345),\n+ LinUCB(dim=4, n_actions=3, random_state=12345),\n+ ],\n+)\n+def test_run_bandit_simulation_does_not_crash_with_various_bandit_algorithms(policy):\n+ n_rounds = 5\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ dim_context=4,\n+ reward_type=\"binary\",\n+ reward_function=logistic_reward_function,\n+ random_state=12345,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+ bandit_feedback[\"round_delays\"] = np.asarray([2, 1, 2, 2, 2])\n+\n+ _ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=policy)\n+\n+\n+def test_run_bandit_simulation_applies_policy_directly_when_no_delay():\n+ n_rounds = 5\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ dim_context=1,\n+ reward_type=\"binary\",\n+ reward_function=logistic_reward_function,\n+ random_state=12345,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n+\n+ bandit_feedback[\"round_delays\"] = None\n+\n+ tracker = BanditUpdateTracker(n_actions=3, random_state=12345)\n+ _ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=tracker)\n+\n+ expected_updates = [\n+ {\"action\": 0, \"reward\": 1, \"round\": 1},\n+ {\"action\": 0, \"reward\": 1, \"round\": 2},\n+ {\"action\": 2, \"reward\": 1, \"round\": 3},\n+ {\"action\": 1, \"reward\": 0, \"round\": 4},\n+ {\"action\": 2, \"reward\": 0, \"round\": 5},\n+ ]\n+\n+ assert tracker.parameter_updates == expected_updates\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | reward independent delays for the simulator |
641,008 | 12.07.2022 11:37:51 | -7,200 | b5f9bd42cd5523b48e24ae553ba6fbe59afa9774 | reward depend delays for the simulators | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -358,7 +358,11 @@ class SyntheticBanditDataset(BaseBanditDataset):\nround_delays = None\nif self.delay_function:\n- round_delays = self.delay_function(actions.shape[0])\n+ round_delays = self.delay_function(\n+ n_rounds=actions.shape[0],\n+ n_actions=self.n_actions,\n+ expected_rewards=expected_reward_,\n+ )\nreturn dict(\nn_rounds=n_rounds,\n@@ -837,7 +841,24 @@ def _base_reward_function(\n@dataclass\nclass ExponentialDelaySampler:\n- scale: float = 100.0\n+ \"\"\"Class for sampling delays from different exponential functions.\n+\n+ Parameters\n+ -----------\n+ max_scale: float, default=100.0\n+ The maximum scale parameter for the exponential delay distribution. When there is no weighted exponential\n+ function the max_scale becomes the default scale.\n+\n+ min_scale: float, default=10.0\n+ The minimum scale parameter for the exponential delay distribution. Only used when sampling from a weighted\n+ exponential function.\n+\n+ random_state: int, default=12345\n+ Controls the random seed in sampling synthetic bandit data.\n+ \"\"\"\n+\n+ max_scale: float = 100.0\n+ min_scale: float = 10.0\nrandom_state: int = None\ndef __post_init__(self) -> None:\n@@ -845,7 +866,9 @@ class ExponentialDelaySampler:\nraise ValueError(\"`random_state` must be given\")\nself.random_ = check_random_state(self.random_state)\n- def exponential_delay_function(self, sample_rounds: int) -> np.ndarray:\n+ def exponential_delay_function(\n+ self, n_rounds: int, n_actions: int, **kwargs\n+ ) -> np.ndarray:\n\"\"\"Exponential delay function used for sampling a number of delay rounds before rewards can be observed.\nNote\n@@ -855,14 +878,12 @@ class ExponentialDelaySampler:\nParameters\n-----------\n- sample_rounds: int\n+ n_rounds: int\nNumber of rounds to sample delays for.\n- scale : float, default=100.0\n- The scale parameter, :math:`\\beta = 1/\\lambda`. Must be non-negative.\n-\n- random_state: int, default=None\n- Controls the random seed in sampling dataset.\n+ n_actions: int\n+ Number of actions to sample delays for. If the exponential function is not parameterised the delays are\n+ repeated for each actions.\nReturns\n---------\n@@ -875,7 +896,39 @@ class ExponentialDelaySampler:\n\"Addressing delayed feedback for continuous training with neural networks in CTR prediction.\" 2019.\n\"\"\"\n- return np.ceil(self.random_.exponential(scale=self.scale, size=sample_rounds))\n+ delays_per_round = np.ceil(\n+ self.random_.exponential(scale=self.max_scale, size=n_rounds)\n+ )\n+\n+ return np.tile(delays_per_round, (n_actions, 1)).T\n+\n+ def exponential_delay_function_expected_reward_weighted(\n+ self, expected_rewards: np.ndarray, **kwargs\n+ ) -> np.ndarray:\n+ \"\"\"Exponential delay function used for sampling a number of delay rounds before rewards can be observed.\n+ Each delay is conditioned on the expected reward by multiplying (1 - expected_reward) * scale. This creates\n+ the assumption that the more likely a reward is going be observed, the more likely it will be that the reward\n+ comes sooner. Eg. recommending an attractive item will likely result in a faster purchase.\n+\n+ Parameters\n+ -----------\n+ expected_rewards : array-like, shape (n_rounds, n_actions)\n+ The expected reward between 0 and 1 for each arm for each round. This used to weight the scale of the\n+ exponential function.\n+\n+ Returns\n+ ---------\n+ delay_rounds: array-like, shape (n_rounds, )\n+ Rounded up round delays representing the amount of rounds before the policy can observe the rewards.\n+ \"\"\"\n+ scale = self.min_scale + (\n+ (1 - expected_rewards) * (self.max_scale - self.min_scale)\n+ )\n+ delays_per_round = np.ceil(\n+ self.random_.exponential(scale=scale, size=expected_rewards.shape)\n+ )\n+\n+ return delays_per_round\ndef linear_behavior_policy(\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "# Licensed under the Apache 2.0 License.\n\"\"\"Bandit Simulator.\"\"\"\n+from collections import defaultdict\nfrom copy import deepcopy\nfrom typing import Callable, Dict, List\nfrom typing import Union\n@@ -60,11 +61,11 @@ def run_bandit_simulation(\nbandit_feedback[\"action\"], dtype=int\n)\n- reward_round_lookup = None\n- if bandit_feedback[\"round_delays\"] is not None:\n- reward_round_lookup = create_reward_round_lookup(\n- bandit_feedback[\"round_delays\"]\n- )\n+ reward_round_lookup = defaultdict(list)\n+ # if bandit_feedback[\"round_delays\"] is not None:\n+ # reward_round_lookup = create_reward_round_lookup(\n+ # bandit_feedback[\"round_delays\"]\n+ # )\nfor round_idx, (position_, context_, factual_reward) in tqdm(\nenumerate(\n@@ -94,6 +95,11 @@ def run_bandit_simulation(\nif bandit_feedback[\"round_delays\"] is None:\nupdate_policy(policy_, context_, action_, reward_)\nelse:\n+ # Add the current round to the lookup\n+ round_delay = bandit_feedback[\"round_delays\"][round_idx, action_]\n+ reward_round_lookup[round_delay + round_idx].append(round_idx)\n+\n+ # Update policy with all available rounds\navailable_rounds = reward_round_lookup.get(round_idx, [])\ndelayed_update_policy(\navailable_rounds, bandit_feedback, selected_actions_list, policy_\n@@ -102,7 +108,6 @@ def run_bandit_simulation(\nif available_rounds:\ndel reward_round_lookup[round_idx]\n- if bandit_feedback[\"round_delays\"] is not None:\nfor round_idx, available_rounds in reward_round_lookup.items():\ndelayed_update_policy(\navailable_rounds, bandit_feedback, selected_actions_list, policy_\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -312,12 +312,13 @@ def test_synthetic_sample_reward_using_valid_inputs(context, action, description\ndef test_synthetic_sample_results_in_sampled_delay_when_delay_function_is_given():\n+ n_actions = 3\ndelay_function = ExponentialDelaySampler(\n- scale=100.0, random_state=12345\n+ max_scale=100.0, random_state=12345\n).exponential_delay_function\ndataset = SyntheticBanditDataset(\n- n_actions=10,\n+ n_actions=n_actions,\nreward_function=logistic_sparse_reward_function,\ndelay_function=delay_function,\nrandom_state=12345,\n@@ -325,18 +326,46 @@ def test_synthetic_sample_results_in_sampled_delay_when_delay_function_is_given(\nactual_bandits_dataset = dataset.obtain_batch_bandit_feedback(n_rounds=5)\n- assert (\n- actual_bandits_dataset[\"round_delays\"] == [266.0, 39.0, 21.0, 23.0, 84.0]\n- ).all()\n+ expected_round_delays = np.tile([266.0, 39.0, 21.0, 23.0, 84.0], (n_actions, 1)).T\n+ assert (actual_bandits_dataset[\"round_delays\"] == expected_round_delays).all()\n+\n+\n+def test_synthetic_sample_results_in_sampled_delay_with_weighted_delays_per_arm():\n+ n_actions = 3\n+ delay_function = ExponentialDelaySampler(\n+ max_scale=100.0, min_scale=10.0, random_state=12345\n+ ).exponential_delay_function_expected_reward_weighted\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=n_actions,\n+ reward_function=logistic_sparse_reward_function,\n+ delay_function=delay_function,\n+ random_state=12345,\n+ )\n+\n+ actual_bandits_dataset = dataset.obtain_batch_bandit_feedback(n_rounds=5)\n+\n+ expected_round_delays = np.asarray(\n+ [\n+ [35.0, 38.0, 4.0],\n+ [3.0, 84.0, 17.0],\n+ [44.0, 106.0, 26.0],\n+ [14.0, 138.0, 61.0],\n+ [1.0, 12.0, 7.0],\n+ ]\n+ )\n+ assert (actual_bandits_dataset[\"round_delays\"] == expected_round_delays).all()\ndef test_synthetic_sample_results_with_exponential_delay_function_has_different_delays_each_batch():\n+ n_actions = 3\n+\ndelay_function = ExponentialDelaySampler(\n- scale=1000.0, random_state=12345\n+ max_scale=1000.0, random_state=12345\n).exponential_delay_function\ndataset = SyntheticBanditDataset(\n- n_actions=10,\n+ n_actions=n_actions,\nreward_function=logistic_sparse_reward_function,\ndelay_function=delay_function,\nrandom_state=12345,\n@@ -344,17 +373,27 @@ def test_synthetic_sample_results_with_exponential_delay_function_has_different_\nactual_delays_1 = dataset.obtain_batch_bandit_feedback(n_rounds=5)[\"round_delays\"]\nactual_delays_2 = dataset.obtain_batch_bandit_feedback(n_rounds=5)[\"round_delays\"]\n- assert (actual_delays_1 == [2654.0, 381.0, 204.0, 229.0, 839.0]).all()\n- assert (actual_delays_2 == [906.0, 3339.0, 1059.0, 1382.0, 1061.0]).all()\n+\n+ expected_round_delays_1 = np.tile(\n+ [2654.0, 381.0, 204.0, 229.0, 839.0], (n_actions, 1)\n+ ).T\n+ expected_round_delays_2 = np.tile(\n+ [906.0, 3339.0, 1059.0, 1382.0, 1061.0], (n_actions, 1)\n+ ).T\n+\n+ assert (actual_delays_1 == expected_round_delays_1).all()\n+ assert (actual_delays_2 == expected_round_delays_2).all()\ndef test_synthetic_sample_results_with_exponential_delay_function_has_same_delays_each_dataset():\n+ n_actions = 3\n+\ndelay_function = ExponentialDelaySampler(\n- scale=1000.0, random_state=12345\n+ max_scale=1000.0, random_state=12345\n).exponential_delay_function\ndataset = SyntheticBanditDataset(\n- n_actions=10,\n+ n_actions=n_actions,\nreward_function=logistic_sparse_reward_function,\ndelay_function=delay_function,\nrandom_state=12345,\n@@ -363,23 +402,31 @@ def test_synthetic_sample_results_with_exponential_delay_function_has_same_delay\nactual_delays_1 = dataset.obtain_batch_bandit_feedback(n_rounds=5)[\"round_delays\"]\ndelay_function = ExponentialDelaySampler(\n- scale=1000.0, random_state=12345\n+ max_scale=1000.0, random_state=12345\n).exponential_delay_function\ndataset = SyntheticBanditDataset(\n- n_actions=10,\n+ n_actions=n_actions,\nreward_function=logistic_sparse_reward_function,\ndelay_function=delay_function,\nrandom_state=12345,\n)\nactual_delays_2 = dataset.obtain_batch_bandit_feedback(n_rounds=5)[\"round_delays\"]\n- assert (actual_delays_1 == [2654.0, 381.0, 204.0, 229.0, 839.0]).all()\n- assert (actual_delays_2 == [2654.0, 381.0, 204.0, 229.0, 839.0]).all()\n+\n+ expected_round_delays_1 = np.tile(\n+ [2654.0, 381.0, 204.0, 229.0, 839.0], (n_actions, 1)\n+ ).T\n+ expected_round_delays_2 = np.tile(\n+ [2654.0, 381.0, 204.0, 229.0, 839.0], (n_actions, 1)\n+ ).T\n+\n+ assert (actual_delays_1 == expected_round_delays_1).all()\n+ assert (actual_delays_2 == expected_round_delays_2).all()\ndef test_synthetic_sample_results_do_not_contain_reward_delay_when_delay_function_is_none():\ndataset = SyntheticBanditDataset(\n- n_actions=10,\n+ n_actions=3,\nreward_function=logistic_sparse_reward_function,\nrandom_state=12345,\n)\n@@ -390,11 +437,13 @@ def test_synthetic_sample_results_do_not_contain_reward_delay_when_delay_functio\ndef test_synthetic_sample_results_reward_delay_is_configurable_through_delay_function():\n+ n_actions = 3\n+\ndef trivial_delay_func(*args, **kwargs):\nreturn np.asarray([1, 2, 3, 4, 5])\ndataset = SyntheticBanditDataset(\n- n_actions=10,\n+ n_actions=3,\nreward_function=logistic_sparse_reward_function,\ndelay_function=trivial_delay_func,\nrandom_state=12345,\n@@ -406,21 +455,21 @@ def test_synthetic_sample_results_reward_delay_is_configurable_through_delay_fun\[email protected](\n- \"size, random_state, expected_delays\",\n+ \"size, actions, random_state, expected_delays\",\n[\n- (5, 12345, [266.0, 39.0, 21.0, 23.0, 84.0]),\n- (3, 12345, [266.0, 39.0, 21.0]),\n- (5, 54321, [243.0, 98.0, 157.0, 57.0, 79.0]),\n+ (5, 3, 12345, np.tile([266.0, 39.0, 21.0, 23.0, 84.0], (3, 1)).T),\n+ (3, 3, 12345, np.tile([266.0, 39.0, 21.0], (3, 1)).T),\n+ (5, 3, 54321, np.tile([243.0, 98.0, 157.0, 57.0, 79.0], (3, 1)).T),\n],\n)\ndef test_exponential_delay_function_results_in_expected_seeded_discrete_delays(\n- size, random_state, expected_delays\n+ size, actions, random_state, expected_delays\n):\ndelay_function = ExponentialDelaySampler(\n- scale=100.0, random_state=random_state\n+ max_scale=100.0, random_state=random_state\n).exponential_delay_function\n- actual_delays = delay_function(size)\n+ actual_delays = delay_function(size, n_actions=actions)\nassert (actual_delays == expected_delays).all()\n@@ -481,6 +530,36 @@ def test_synthetic_obtain_batch_bandit_feedback():\n)\[email protected](\n+ \"size, actions, expected_reward, random_state, expected_delays\",\n+ [\n+ (\n+ 2,\n+ 2,\n+ np.asarray([[1, 0.01], [0.5, 0.5]]),\n+ 12344,\n+ np.asarray([[2.0, 55.0], [3.0, 27.0]]),\n+ ),\n+ (\n+ 2,\n+ 2,\n+ np.asarray([[0.1, 0.2], [0.3, 0.4]]),\n+ 12345,\n+ np.asarray([[242.0, 32.0], [15.0, 15.0]]),\n+ ),\n+ ],\n+)\n+def test_exponential_delay_function_conditioned_on_expected_reward_results_in_expected_seeded_discrete_delays(\n+ size, actions, expected_reward, random_state, expected_delays\n+):\n+ delay_function = ExponentialDelaySampler(\n+ max_scale=100.0, min_scale=10.0, random_state=random_state\n+ ).exponential_delay_function_expected_reward_weighted\n+\n+ actual_delays = delay_function(expected_rewards=expected_reward)\n+ assert (actual_delays == expected_delays).all()\n+\n+\n# expected_reward, action_dist, description\ninvalid_input_of_calc_policy_value = [\n(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_simulator.py",
"new_path": "tests/simulator/test_simulator.py",
"diff": "@@ -7,7 +7,7 @@ from obp.policy import BaseContextFreePolicy\nfrom obp.policy.linear import LinTS, LinUCB\nfrom obp.policy.contextfree import EpsilonGreedy, Random, BernoulliTS\n-from obp.dataset.synthetic import logistic_reward_function, sparse_reward_function\n+from obp.dataset.synthetic import logistic_reward_function, ExponentialDelaySampler\nfrom obp.dataset import SyntheticBanditDataset\nfrom obp.policy.policy_type import PolicyType\nfrom obp.simulator import run_bandit_simulation\n@@ -98,6 +98,25 @@ class BanditUpdateTracker(BaseContextFreePolicy):\n)\n+def test_run_bandit_simulation_works_end_to_end_with_synthetic_bandit_dataset():\n+ delay_function = ExponentialDelaySampler(\n+ scale=1.0, random_state=12345\n+ ).exponential_delay_function\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ dim_context=1,\n+ reward_type=\"binary\",\n+ reward_function=logistic_reward_function,\n+ delay_function=delay_function,\n+ random_state=12345,\n+ )\n+ bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=5)\n+\n+ policy = EpsilonGreedy(n_actions=3, epsilon=0.1, random_state=12345)\n+ _ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=policy)\n+\n+\ndef test_run_bandit_simulation_applies_policy_in_delay_specified_order():\nn_rounds = 5\n@@ -109,7 +128,7 @@ def test_run_bandit_simulation_applies_policy_in_delay_specified_order():\nrandom_state=12345,\n)\nbandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- bandit_feedback[\"round_delays\"] = np.asarray([2, 1, 2, 1, 0])\n+ bandit_feedback[\"round_delays\"] = np.tile([2, 1, 2, 1, 0], (3, 1)).T\ntracker = BanditUpdateTracker(n_actions=3, random_state=12345)\n_ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=tracker)\n@@ -136,7 +155,7 @@ def test_run_bandit_simulation_applies_all_rewards_delayed_till_after_all_rounds\nrandom_state=12345,\n)\nbandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- bandit_feedback[\"round_delays\"] = np.asarray([2, 1, 2, 2, 2])\n+ bandit_feedback[\"round_delays\"] = np.tile([2, 1, 2, 2, 2], (3, 1)).T\ntracker = BanditUpdateTracker(n_actions=3, random_state=12345)\n_ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=tracker)\n@@ -173,7 +192,7 @@ def test_run_bandit_simulation_does_not_crash_with_various_bandit_algorithms(pol\nrandom_state=12345,\n)\nbandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)\n- bandit_feedback[\"round_delays\"] = np.asarray([2, 1, 2, 2, 2])\n+ bandit_feedback[\"round_delays\"] = np.tile([2, 1, 2, 2, 2], (3, 1)).T\n_ = run_bandit_simulation(bandit_feedback=bandit_feedback, policy=policy)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | reward depend delays for the simulators |
641,008 | 12.07.2022 16:02:02 | -7,200 | 9c53a193fc349828c0f97cf5830ca150571af841 | psuedocode and coefficient adjustments for drift implemetnation | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "\"\"\"Class for Generating Synthetic Logged Bandit Data.\"\"\"\nfrom dataclasses import dataclass\n-from typing import Callable\n+from typing import Callable, Tuple\nfrom typing import Optional\nimport numpy as np\n@@ -20,6 +20,8 @@ from ..utils import softmax\nfrom .base import BaseBanditDataset\nfrom .reward_type import RewardType\n+coef_func_signature = Callable[[np.ndarray, np.ndarray, np.random.RandomState], Tuple[np.ndarray, np.ndarray, np.ndarray]]\n+\n@dataclass\nclass SyntheticBanditDataset(BaseBanditDataset):\n@@ -449,9 +451,54 @@ class SyntheticBanditDataset(BaseBanditDataset):\nreturn np.sum(factual_reward * sampled_actions)\n+def sample_random_uniform_coefficients(\n+ effective_dim_action_context: int,\n+ effective_dim_context: int,\n+ random_: np.random.RandomState\n+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n+ context_coef_ = random_.uniform(-1, 1, size=effective_dim_context)\n+ action_coef_ = random_.uniform(-1, 1, size=effective_dim_action_context)\n+ context_action_coef_ = random_.uniform(\n+ -1, 1, size=(effective_dim_context, effective_dim_action_context)\n+ )\n+ return context_coef_, action_coef_, context_action_coef_\n+\n+\n+@dataclass()\n+class CoefficientDrifter():\n+ drift_interval: int\n+ played_rounds: int = 0\n+ random_state: int = 12345\n+\n+ def __post_init__(self) -> None:\n+ if self.random_state is None:\n+ raise ValueError(\"`random_state` must be given\")\n+ self.random_ = check_random_state(self.random_state)\n+\n+ def get_coefficients(\n+ self,\n+ n_rounds: int,\n+ effective_dim_action_context: int,\n+ effective_dim_context: int,\n+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n+ # Available_rounds = current_round % drift_interval\n+ # While required_rounds > 0:\n+ # Calculate available rounds in current coef\n+ # tile current coef for available rounds\n+ # if available rounds < required_rounds\n+ # sample new coef\n+ # required_rounds = required_rounds - available_rounds\n+ # Stack all coef\n+ #\n+\n+ return sample_random_uniform_coefficients(effective_dim_action_context, effective_dim_context, self.random_)\n+\n+\n+\ndef logistic_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ coef_func: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards.\n@@ -464,6 +511,10 @@ def logistic_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation of actions.\n+ coef_func: Callable, default=sample_random_uniform_coefficients\n+ Function for generating the coefficients used for the context, action and context/action interactions.\n+ By default, the coefficients are randomly uniformly drawn.\n+\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\n@@ -479,6 +530,7 @@ def logistic_reward_function(\naction_context=action_context,\ndegree=1,\nrandom_state=random_state,\n+ coef_func=coef_func,\n)\nreturn sigmoid(logits)\n@@ -487,6 +539,7 @@ def logistic_reward_function(\ndef logistic_polynomial_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ coef_func: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards with polynomial feature transformations.\n@@ -518,6 +571,7 @@ def logistic_polynomial_reward_function(\ncontext=context,\naction_context=action_context,\ndegree=3,\n+ coef_func=coef_func,\nrandom_state=random_state,\n)\n@@ -527,6 +581,7 @@ def logistic_polynomial_reward_function(\ndef logistic_sparse_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ coef_func: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards with small effective feature dimension.\n@@ -560,6 +615,7 @@ def logistic_sparse_reward_function(\naction_context=action_context,\ndegree=4,\neffective_dim_ratio=0.3,\n+ coef_func=coef_func,\nrandom_state=random_state,\n)\n@@ -569,6 +625,7 @@ def logistic_sparse_reward_function(\ndef logistic_sparse_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ coef_func: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards with small effective feature dimension.\n@@ -602,6 +659,7 @@ def logistic_sparse_reward_function(\naction_context=action_context,\ndegree=4,\neffective_dim_ratio=0.3,\n+ coef_func=coef_func,\nrandom_state=random_state,\n)\n@@ -611,6 +669,7 @@ def logistic_sparse_reward_function(\ndef linear_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ coef_func: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Linear mean reward function for continuous rewards.\n@@ -637,6 +696,7 @@ def linear_reward_function(\ncontext=context,\naction_context=action_context,\ndegree=1,\n+ coef_func=coef_func,\nrandom_state=random_state,\n)\n@@ -644,6 +704,7 @@ def linear_reward_function(\ndef polynomial_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ coef_func: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Polynomial mean reward function for continuous rewards.\n@@ -675,6 +736,7 @@ def polynomial_reward_function(\ncontext=context,\naction_context=action_context,\ndegree=3,\n+ coef_func=coef_func,\nrandom_state=random_state,\n)\n@@ -682,6 +744,7 @@ def polynomial_reward_function(\ndef sparse_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n+ coef_func: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Sparse mean reward function for continuous rewards.\n@@ -715,6 +778,7 @@ def sparse_reward_function(\naction_context=action_context,\ndegree=4,\neffective_dim_ratio=0.3,\n+ coef_func=coef_func,\nrandom_state=random_state,\n)\n@@ -724,6 +788,7 @@ def _base_reward_function(\naction_context: np.ndarray,\ndegree: int = 3,\neffective_dim_ratio: float = 1.0,\n+ coef_func: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Base function to define mean reward functions.\n@@ -820,13 +885,12 @@ def _base_reward_function(\neffective_context_ = context_\neffective_action_context_ = action_context_\n- context_coef_ = random_.uniform(-1, 1, size=effective_dim_context)\n- action_coef_ = random_.uniform(-1, 1, size=effective_dim_action_context)\n- context_action_coef_ = random_.uniform(\n- -1, 1, size=(effective_dim_context, effective_dim_action_context)\n- )\n+ context_coef_, action_coef_, context_action_coef_ = coef_func(effective_dim_action_context, effective_dim_context,\n+ random_)\n- context_values = np.tile(effective_context_ @ context_coef_, (n_actions, 1)).T\n+ context_coef_ = np.tile(context_coef_, (datasize, 1))\n+ context_values = np.tile(np.sum(effective_context_ * context_coef_, axis=1), (n_actions, 1)).T\n+ # context_values = np.tile(effective_context_ @ context_coef_, (n_actions, 1)).T\naction_values = np.tile(action_coef_ @ effective_action_context_.T, (datasize, 1))\ncontext_action_values = (\neffective_context_ @ context_action_coef_ @ effective_action_context_.T\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "-from functools import partial\n-\nimport numpy as np\nimport pytest\nfrom obp.dataset import SyntheticBanditDataset\n-from obp.dataset.synthetic import linear_behavior_policy, ExponentialDelaySampler\n+from obp.dataset.synthetic import linear_behavior_policy, ExponentialDelaySampler, _base_reward_function, \\\n+ CoefficientDrifter\nfrom obp.dataset.synthetic import linear_reward_function\nfrom obp.dataset.synthetic import logistic_polynomial_reward_function\nfrom obp.dataset.synthetic import logistic_reward_function\n@@ -751,3 +750,119 @@ def test_synthetic_behavior_policy_function():\n)\nassert action_prob.shape[0] == n_rounds and action_prob.shape[1] == n_actions\nassert np.all(0 <= action_prob) and np.all(action_prob <= 1)\n+\n+\n+def test_base_reward_create_a_matrix_with_expected_rewards_with_identical_expectation_for_identical_rounds_():\n+ context = np.asarray([\n+ [1,2],\n+ [3,2],\n+ [3,2],\n+ ])\n+ action_context = np.asarray([\n+ [1,0,0],\n+ [0,0,1],\n+ [0,1,0],\n+ ])\n+ actual_expected_rewards = _base_reward_function(\n+ context, action_context, degree=5, effective_dim_ratio=1.0, random_state=12345)\n+\n+ expected_expected_rewards = np.asarray([\n+ [-4.50475921, -5.60364479, -4.6827207 ],\n+ [ 3.57444414, 7.18370309, -3.36258488],\n+ [ 3.57444414, 7.18370309, -3.36258488]])\n+\n+ assert np.allclose(actual_expected_rewards, expected_expected_rewards)\n+\n+\n+def test_coefficient_tracker_can_shift_expected_rewards_instantly_based_on_configured_intervals():\n+ context = np.asarray([\n+ [1,2],\n+ [3,2],\n+ [3,2],\n+ [3,2],\n+ ])\n+ action_context = np.asarray([\n+ [1,0,0],\n+ [0,0,1],\n+ [0,1,0],\n+ ])\n+ actual_expected_rewards = _base_reward_function(\n+ context,\n+ action_context,\n+ degree=5,\n+ effective_dim_ratio=1.0,\n+ # coef_func=TODOCLASS,\n+ random_state=12345\n+ )\n+\n+ expected_expected_rewards = np.asarray([\n+ [-4.50475921, -5.60364479, -4.6827207],\n+ [ 3.57444414, 7.18370309, -3.36258488],\n+ [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE\n+ [ 3.57444414, 7.18370309, -3.36258488]])\n+\n+ assert np.allclose(actual_expected_rewards, expected_expected_rewards)\n+\n+\n+def test_coefficient_tracker_can_shift_coefficient_instantly_based_on_configured_interval():\n+ drifter = CoefficientDrifter(drift_interval=3)\n+\n+ effective_dim_context = 4\n+ effective_dim_action_context = 3\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=4, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)\n+\n+ expected_context_coef = np.asarray([\n+ [-4.50475921, -5.60364479, -4.6827207 ],\n+ [ 3.57444414, 7.18370309, -3.36258488],\n+ [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE\n+ [ 3.57444414, 7.18370309, -3.36258488]]\n+ )\n+\n+ assert np.allclose(actual_context_coef, expected_context_coef)\n+\n+\n+def test_coefficient_tracker_can_shift_coefficient_multiple_times_instantly_based_on_configured_interval():\n+ drifter = CoefficientDrifter(drift_interval=2)\n+\n+ effective_dim_context = 4\n+ effective_dim_action_context = 3\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=4, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)\n+\n+ expected_context_coef = np.asarray([\n+ [-4.50475921, -5.60364479, -4.6827207 ],\n+ [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE\n+ [ 3.57444414, 7.18370309, -3.36258488],\n+ [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE AGAIN\n+ [ 3.57444414, 7.18370309, -3.36258488],\n+ ]\n+ )\n+\n+ assert np.allclose(actual_context_coef, expected_context_coef)\n+\n+\n+def test_coefficient_tracker_keeps_track_of_shifted_coefficient_based_on_configured_interval_between_batches():\n+ drifter = CoefficientDrifter(drift_interval=2)\n+\n+ effective_dim_context = 4\n+ effective_dim_action_context = 3\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=3, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)\n+\n+ expected_context_coef = np.asarray([\n+ [-4.50475921, -5.60364479, -4.6827207 ],\n+ [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE\n+ [ 3.57444414, 7.18370309, -3.36258488],]\n+ )\n+\n+ assert np.allclose(actual_context_coef, expected_context_coef)\n+\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=3, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)\n+\n+ expected_context_coef_2 = np.asarray([\n+ [-4.50475921, -5.60364479, -4.6827207 ], # THIS ROUND SHOULD BE THE SAME AS THE LAST ONE FROM THE PREVIOUS\n+ [ 3.57444414, 7.18370309, -3.36258488],\n+ [ 3.57444414, 7.18370309, -3.36258488], # HERE THE COEF SHOULD CHANGE AGAIN\n+ ]\n+ )\n+\n+ assert np.allclose(actual_context_coef, expected_context_coef_2)\n+\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | psuedocode and coefficient adjustments for drift implemetnation |
641,008 | 14.07.2022 18:03:26 | -7,200 | 3f2eb265ab08198e3412002521722f282c7a6758 | Added transition period, transition style, seasonality and base coef to drift | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "# Licensed under the Apache 2.0 License.\n\"\"\"Class for Generating Synthetic Logged Bandit Data.\"\"\"\n+from collections import deque\nfrom dataclasses import dataclass\nfrom typing import Callable, Tuple\n-from typing import Optional\n+from typing import Optional, List\nimport numpy as np\nfrom scipy.stats import truncnorm\n@@ -20,7 +21,189 @@ from ..utils import softmax\nfrom .base import BaseBanditDataset\nfrom .reward_type import RewardType\n-coef_func_signature = Callable[[np.ndarray, np.ndarray, np.random.RandomState], Tuple[np.ndarray, np.ndarray, np.ndarray]]\n+coef_func_signature = Callable[\n+ [np.ndarray, np.ndarray, np.random.RandomState],\n+ Tuple[np.ndarray, np.ndarray, np.ndarray],\n+]\n+\n+\n+def sample_random_uniform_coefficients(\n+ effective_dim_action_context: int,\n+ effective_dim_context: int,\n+ random_: np.random.RandomState,\n+ **kwargs,\n+) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n+ context_coef_ = random_.uniform(-1, 1, size=effective_dim_context)\n+ action_coef_ = random_.uniform(-1, 1, size=effective_dim_action_context)\n+ context_action_coef_ = random_.uniform(\n+ -1, 1, size=(effective_dim_context, effective_dim_action_context)\n+ )\n+ return context_coef_, action_coef_, context_action_coef_\n+\n+\n+@dataclass\n+class CoefficientDrifter:\n+ \"\"\"Class for synthesizing bandit data.\n+\n+ Note\n+ -----\n+ By calling the `obtain_batch_bandit_feedback` method several times,\n+ we can resample logged bandit data from the same data generating distribution.\n+ This can be used to estimate confidence intervals of the performances of OPE estimators.\n+\n+ If None is given as `behavior_policy_function`, the behavior policy will be generated from the true expected reward function. See the description of the `beta` argument, which controls the behavior policy.\n+\n+ Parameters\n+ -----------\n+\n+ References\n+ ------------\n+ Emanuele Cavenaghi, Gabriele Sottocornola, Fabio Stella, and Markus Zanker.\n+ \"Non stationary multi-armed bandit: Empirical evaluation of a new concept drift-aware algorithm.\", 2021.\n+\n+ \"\"\"\n+\n+ drift_interval: int\n+ transition_period: int = 0\n+ transition_type: str = \"linear\" # linear or weighted_sampled\n+ seasonal: bool = False\n+ base_coefficient_weight: float = 0.0\n+ effective_dim_action_context: Optional[int] = None\n+ effective_dim_context: Optional[int] = None\n+ played_rounds: int = 0\n+ random_state: int = 12345\n+\n+ context_coefs: Optional[deque] = None\n+ current_action_coef: Optional[np.ndarray] = None\n+ current_action_context_coef: Optional[np.ndarray] = None\n+ base_coef: Optional[np.ndarray] = None\n+\n+ def __post_init__(self) -> None:\n+ if self.random_state is None:\n+ raise ValueError(\"`random_state` must be given\")\n+ self.random_ = check_random_state(self.random_state)\n+ self.available_rounds = self.drift_interval\n+ self.context_coefs = deque(maxlen=2)\n+ if self.effective_dim_action_context and self.effective_dim_context:\n+ self.update_coef()\n+\n+ def update_coef(self) -> None:\n+ if self.base_coef is None:\n+ self.base_coef, _, _ = sample_random_uniform_coefficients(\n+ self.effective_dim_action_context,\n+ self.effective_dim_context,\n+ self.random_,\n+ )\n+\n+ if len(self.context_coefs) == 0:\n+ (\n+ tmp_context_coef,\n+ tmp_action_coef,\n+ tmp_action_context_coef,\n+ ) = sample_random_uniform_coefficients(\n+ self.effective_dim_action_context,\n+ self.effective_dim_context,\n+ self.random_,\n+ )\n+ self.context_coefs.append(tmp_context_coef)\n+\n+ # We only drift on the context_coef for now.\n+ if self.current_action_coef is None:\n+ self.current_action_coef = tmp_action_coef\n+ if self.current_action_context_coef is None:\n+ self.current_action_context_coef = tmp_action_context_coef\n+\n+ if self.seasonal and len(self.context_coefs) == 2:\n+ self.context_coefs.rotate()\n+ else:\n+ (\n+ tmp_context_coef,\n+ tmp_action_coef,\n+ tmp_action_context_coef,\n+ ) = sample_random_uniform_coefficients(\n+ self.effective_dim_action_context,\n+ self.effective_dim_context,\n+ self.random_,\n+ )\n+ self.context_coefs.append(tmp_context_coef)\n+\n+ def get_coefficients(\n+ self,\n+ n_rounds: int,\n+ effective_dim_context: int = None,\n+ effective_dim_action_context: int = None,\n+ **kwargs,\n+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n+ if effective_dim_action_context and effective_dim_context:\n+ eff_dim_not_set = (\n+ not self.effective_dim_action_context and not self.effective_dim_context\n+ )\n+ eff_dim_equal = (\n+ self.effective_dim_action_context == effective_dim_action_context\n+ and self.effective_dim_context == effective_dim_context\n+ )\n+ if eff_dim_not_set or eff_dim_equal:\n+ self.effective_dim_action_context = effective_dim_action_context\n+ self.effective_dim_context = effective_dim_context\n+ else:\n+ raise RuntimeError(\"Trying to change the effective dimensions\")\n+\n+ if len(self.context_coefs) == 0:\n+ self.update_coef()\n+\n+ required_rounds = n_rounds\n+ context_coefs = []\n+\n+ while required_rounds > 0:\n+ if required_rounds >= self.available_rounds:\n+ self.append_current_coefs(context_coefs, rounds=self.available_rounds)\n+ required_rounds -= self.available_rounds\n+ self.update_coef()\n+ self.available_rounds = self.drift_interval\n+ else:\n+ self.append_current_coefs(context_coefs, rounds=required_rounds)\n+ self.available_rounds -= required_rounds\n+ required_rounds = 0\n+\n+ # For now, we only drift on the context coefs, rest is static\n+ return (\n+ np.vstack(context_coefs),\n+ self.current_action_coef,\n+ self.current_action_context_coef,\n+ )\n+\n+ def append_current_coefs(\n+ self, context_coefs: List[np.ndarray], rounds: int\n+ ) -> None:\n+ shift_start = self.available_rounds - self.transition_period\n+\n+ transition_steps = np.arange(start=1, stop=self.transition_period + 1)\n+ if shift_start >= 0:\n+ transition_steps = np.pad(transition_steps, pad_width=[(shift_start, 0)])\n+ if shift_start < 0:\n+ transition_steps = transition_steps[-shift_start:]\n+\n+ shift_remainder = self.available_rounds - rounds\n+ if shift_remainder > 0:\n+ transition_steps = transition_steps[shift_remainder:]\n+\n+ weights = transition_steps / (self.transition_period + 1)\n+\n+ if self.transition_type is \"weighted_sampled\":\n+ weights = self.random_.binomial(n=1, p=weights)\n+\n+ A = np.tile(self.context_coefs[0], (rounds, 1))\n+ B = np.tile(self.context_coefs[1], (rounds, 1))\n+\n+ base_coef = self.base_coefficient_weight * self.base_coef\n+\n+ coefs = (\n+ base_coef\n+ + A * np.expand_dims((1 - self.base_coefficient_weight) * (1 - weights), 1)\n+ + B * np.expand_dims((1 - self.base_coefficient_weight) * weights, 1)\n+ )\n+\n+ context_coefs.append(coefs)\n@dataclass\n@@ -59,6 +242,10 @@ class SyntheticBanditDataset(BaseBanditDataset):\nFunction defining the delay rounds for each given action-context pair,\nIf None, the `delay_rounds` key will be omitted from the dataset samples.\n+ coef_function: Callable[[np.ndarray, np.ndarray], np.ndarray]], default=sample_random_uniform_coefficients\n+ Function responsible for providing coefficients to the reward function. By default coefficients are sampled\n+ as random uniform.\n+\nreward_std: float, default=1.0\nStandard deviation of the reward distribution.\nA larger value leads to a noisier reward distribution.\n@@ -164,6 +351,9 @@ class SyntheticBanditDataset(BaseBanditDataset):\nreward_type: str = RewardType.BINARY.value\nreward_function: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None\ndelay_function: Optional[Callable[[int, float], np.ndarray]] = None\n+ coef_function: Optional[\n+ Callable[[int, float], np.ndarray]\n+ ] = sample_random_uniform_coefficients\nreward_std: float = 1.0\naction_context: Optional[np.ndarray] = None\nbehavior_policy_function: Optional[\n@@ -237,6 +427,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\ncontext=context,\naction_context=self.action_context,\nrandom_state=self.random_state,\n+ coef_function=self.coef_function,\n)\nreturn expected_reward_\n@@ -451,54 +642,10 @@ class SyntheticBanditDataset(BaseBanditDataset):\nreturn np.sum(factual_reward * sampled_actions)\n-def sample_random_uniform_coefficients(\n- effective_dim_action_context: int,\n- effective_dim_context: int,\n- random_: np.random.RandomState\n- ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n- context_coef_ = random_.uniform(-1, 1, size=effective_dim_context)\n- action_coef_ = random_.uniform(-1, 1, size=effective_dim_action_context)\n- context_action_coef_ = random_.uniform(\n- -1, 1, size=(effective_dim_context, effective_dim_action_context)\n- )\n- return context_coef_, action_coef_, context_action_coef_\n-\n-\n-@dataclass()\n-class CoefficientDrifter():\n- drift_interval: int\n- played_rounds: int = 0\n- random_state: int = 12345\n-\n- def __post_init__(self) -> None:\n- if self.random_state is None:\n- raise ValueError(\"`random_state` must be given\")\n- self.random_ = check_random_state(self.random_state)\n-\n- def get_coefficients(\n- self,\n- n_rounds: int,\n- effective_dim_action_context: int,\n- effective_dim_context: int,\n- ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n- # Available_rounds = current_round % drift_interval\n- # While required_rounds > 0:\n- # Calculate available rounds in current coef\n- # tile current coef for available rounds\n- # if available rounds < required_rounds\n- # sample new coef\n- # required_rounds = required_rounds - available_rounds\n- # Stack all coef\n- #\n-\n- return sample_random_uniform_coefficients(effective_dim_action_context, effective_dim_context, self.random_)\n-\n-\n-\ndef logistic_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n- coef_func: coef_func_signature = sample_random_uniform_coefficients,\n+ coef_function: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards.\n@@ -511,7 +658,7 @@ def logistic_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation of actions.\n- coef_func: Callable, default=sample_random_uniform_coefficients\n+ coef_function: Callable, default=sample_random_uniform_coefficients\nFunction for generating the coefficients used for the context, action and context/action interactions.\nBy default, the coefficients are randomly uniformly drawn.\n@@ -530,7 +677,7 @@ def logistic_reward_function(\naction_context=action_context,\ndegree=1,\nrandom_state=random_state,\n- coef_func=coef_func,\n+ coef_function=coef_function,\n)\nreturn sigmoid(logits)\n@@ -539,7 +686,7 @@ def logistic_reward_function(\ndef logistic_polynomial_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n- coef_func: coef_func_signature = sample_random_uniform_coefficients,\n+ coef_function: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards with polynomial feature transformations.\n@@ -571,7 +718,7 @@ def logistic_polynomial_reward_function(\ncontext=context,\naction_context=action_context,\ndegree=3,\n- coef_func=coef_func,\n+ coef_function=coef_function,\nrandom_state=random_state,\n)\n@@ -581,7 +728,7 @@ def logistic_polynomial_reward_function(\ndef logistic_sparse_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n- coef_func: coef_func_signature = sample_random_uniform_coefficients,\n+ coef_function: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards with small effective feature dimension.\n@@ -615,7 +762,7 @@ def logistic_sparse_reward_function(\naction_context=action_context,\ndegree=4,\neffective_dim_ratio=0.3,\n- coef_func=coef_func,\n+ coef_function=coef_function,\nrandom_state=random_state,\n)\n@@ -625,7 +772,7 @@ def logistic_sparse_reward_function(\ndef logistic_sparse_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n- coef_func: coef_func_signature = sample_random_uniform_coefficients,\n+ coef_function: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards with small effective feature dimension.\n@@ -659,7 +806,7 @@ def logistic_sparse_reward_function(\naction_context=action_context,\ndegree=4,\neffective_dim_ratio=0.3,\n- coef_func=coef_func,\n+ coef_function=coef_function,\nrandom_state=random_state,\n)\n@@ -669,7 +816,7 @@ def logistic_sparse_reward_function(\ndef linear_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n- coef_func: coef_func_signature = sample_random_uniform_coefficients,\n+ coef_function: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Linear mean reward function for continuous rewards.\n@@ -696,7 +843,7 @@ def linear_reward_function(\ncontext=context,\naction_context=action_context,\ndegree=1,\n- coef_func=coef_func,\n+ coef_function=coef_function,\nrandom_state=random_state,\n)\n@@ -704,7 +851,7 @@ def linear_reward_function(\ndef polynomial_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n- coef_func: coef_func_signature = sample_random_uniform_coefficients,\n+ coef_function: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Polynomial mean reward function for continuous rewards.\n@@ -736,7 +883,7 @@ def polynomial_reward_function(\ncontext=context,\naction_context=action_context,\ndegree=3,\n- coef_func=coef_func,\n+ coef_function=coef_function,\nrandom_state=random_state,\n)\n@@ -744,7 +891,7 @@ def polynomial_reward_function(\ndef sparse_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\n- coef_func: coef_func_signature = sample_random_uniform_coefficients,\n+ coef_function: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Sparse mean reward function for continuous rewards.\n@@ -778,7 +925,7 @@ def sparse_reward_function(\naction_context=action_context,\ndegree=4,\neffective_dim_ratio=0.3,\n- coef_func=coef_func,\n+ coef_function=coef_function,\nrandom_state=random_state,\n)\n@@ -788,7 +935,7 @@ def _base_reward_function(\naction_context: np.ndarray,\ndegree: int = 3,\neffective_dim_ratio: float = 1.0,\n- coef_func: coef_func_signature = sample_random_uniform_coefficients,\n+ coef_function: coef_func_signature = sample_random_uniform_coefficients,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Base function to define mean reward functions.\n@@ -885,12 +1032,19 @@ def _base_reward_function(\neffective_context_ = context_\neffective_action_context_ = action_context_\n- context_coef_, action_coef_, context_action_coef_ = coef_func(effective_dim_action_context, effective_dim_context,\n- random_)\n+ context_coef_, action_coef_, context_action_coef_ = coef_function(\n+ n_rounds=datasize,\n+ effective_dim_action_context=effective_dim_action_context,\n+ effective_dim_context=effective_dim_context,\n+ random_=random_,\n+ )\n- context_coef_ = np.tile(context_coef_, (datasize, 1))\n- context_values = np.tile(np.sum(effective_context_ * context_coef_, axis=1), (n_actions, 1)).T\n- # context_values = np.tile(effective_context_ @ context_coef_, (n_actions, 1)).T\n+ if context_coef_.shape[0] != datasize:\n+ context_values = np.tile(effective_context_ @ context_coef_, (n_actions, 1)).T\n+ else:\n+ context_values = np.tile(\n+ np.sum(effective_context_ * context_coef_, axis=1), (n_actions, 1)\n+ ).T\naction_values = np.tile(action_coef_ @ effective_action_context_.T, (datasize, 1))\ncontext_action_values = (\neffective_context_ @ context_action_coef_ @ effective_action_context_.T\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "+from unittest import mock\n+\nimport numpy as np\nimport pytest\nfrom obp.dataset import SyntheticBanditDataset\n-from obp.dataset.synthetic import linear_behavior_policy, ExponentialDelaySampler, _base_reward_function, \\\n- CoefficientDrifter\n+from obp.dataset.synthetic import (\n+ linear_behavior_policy,\n+ ExponentialDelaySampler,\n+ _base_reward_function,\n+ CoefficientDrifter,\n+)\nfrom obp.dataset.synthetic import linear_reward_function\nfrom obp.dataset.synthetic import logistic_polynomial_reward_function\nfrom obp.dataset.synthetic import logistic_reward_function\n@@ -753,87 +759,368 @@ def test_synthetic_behavior_policy_function():\ndef test_base_reward_create_a_matrix_with_expected_rewards_with_identical_expectation_for_identical_rounds_():\n- context = np.asarray([\n+ context = np.asarray(\n+ [\n[1, 2],\n[3, 2],\n[3, 2],\n- ])\n- action_context = np.asarray([\n+ ]\n+ )\n+ action_context = np.asarray(\n+ [\n[1, 0, 0],\n[0, 0, 1],\n[0, 1, 0],\n- ])\n+ ]\n+ )\nactual_expected_rewards = _base_reward_function(\n- context, action_context, degree=5, effective_dim_ratio=1.0, random_state=12345)\n+ context, action_context, degree=5, effective_dim_ratio=1.0, random_state=12345\n+ )\n- expected_expected_rewards = np.asarray([\n+ expected_expected_rewards = np.asarray(\n+ [\n[-4.50475921, -5.60364479, -4.6827207],\n[3.57444414, 7.18370309, -3.36258488],\n- [ 3.57444414, 7.18370309, -3.36258488]])\n+ [3.57444414, 7.18370309, -3.36258488],\n+ ]\n+ )\nassert np.allclose(actual_expected_rewards, expected_expected_rewards)\n+def test_coefficient_tracker_can_shift_expected_rewards_with_syntethic_dataset_generator():\n+ drifter = CoefficientDrifter(drift_interval=3)\n+\n+ dataset = SyntheticBanditDataset(\n+ n_actions=3,\n+ dim_context=4,\n+ reward_type=\"binary\", # \"binary\" or \"continuous\"\n+ reward_function=logistic_sparse_reward_function,\n+ coef_function=drifter.get_coefficients,\n+ random_state=12345,\n+ )\n+\n+ bandit_dataset = dataset.obtain_batch_bandit_feedback(n_rounds=4)\n+\n+\n+class MockCoefSample:\n+ n_samples = 0\n+\n+ def fake_sample(\n+ self, effective_dim_action_context, effective_dim_context, random_, **kwargs\n+ ):\n+ self.n_samples += 1\n+ context_coef_ = self.n_samples * np.ones(effective_dim_context)\n+ action_coef_ = self.n_samples * np.ones(effective_dim_action_context)\n+ context_action_coef_ = self.n_samples * np.ones(\n+ (effective_dim_context, effective_dim_action_context)\n+ )\n+\n+ return context_coef_, action_coef_, context_action_coef_\n+\n+\ndef test_coefficient_tracker_can_shift_expected_rewards_instantly_based_on_configured_intervals():\n- context = np.asarray([\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(drift_interval=3)\n+\n+ context = np.asarray(\n+ [\n[1, 2],\n[3, 2],\n[3, 2],\n[3, 2],\n- ])\n- action_context = np.asarray([\n+ ]\n+ )\n+ action_context = np.asarray(\n+ [\n[1, 0, 0],\n[0, 0, 1],\n[0, 1, 0],\n- ])\n+ ]\n+ )\nactual_expected_rewards = _base_reward_function(\ncontext,\naction_context,\ndegree=5,\neffective_dim_ratio=1.0,\n- # coef_func=TODOCLASS,\n- random_state=12345\n+ coef_function=drifter.get_coefficients,\n+ random_state=12345,\n)\n- expected_expected_rewards = np.asarray([\n- [-4.50475921, -5.60364479, -4.6827207],\n- [ 3.57444414, 7.18370309, -3.36258488],\n- [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE\n- [ 3.57444414, 7.18370309, -3.36258488]])\n+ expected_expected_rewards = np.asarray(\n+ [\n+ [\n+ -8.63608908,\n+ -8.63608908,\n+ -8.63608908,\n+ ], # This round has a different context and should have diff E[r]\n+ [\n+ 2.57393219,\n+ 2.57393219,\n+ 2.57393219,\n+ ], # The next two rounds have the same context and should have identical\n+ [2.57393219, 2.57393219, 2.57393219], # E[r]\n+ [\n+ 3.4882247,\n+ 3.4882247,\n+ 3.4882247,\n+ ], # This round has the same context but has experienced drift.\n+ ]\n+ )\nassert np.allclose(actual_expected_rewards, expected_expected_rewards)\ndef test_coefficient_tracker_can_shift_coefficient_instantly_based_on_configured_interval():\n- drifter = CoefficientDrifter(drift_interval=3)\n-\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\neffective_dim_context = 4\neffective_dim_action_context = 3\n- actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=4, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)\n+ drifter = CoefficientDrifter(\n+ drift_interval=3,\n+ effective_dim_context=effective_dim_context,\n+ effective_dim_action_context=effective_dim_action_context,\n+ )\n- expected_context_coef = np.asarray([\n- [-4.50475921, -5.60364479, -4.6827207 ],\n- [ 3.57444414, 7.18370309, -3.36258488],\n- [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE\n- [ 3.57444414, 7.18370309, -3.36258488]]\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=4)\n+\n+ expected_context_coef = np.asarray(\n+ [\n+ [2, 2, 2, 2],\n+ [2, 2, 2, 2],\n+ [2, 2, 2, 2], # AFTER THIS ROUND, THE COEFS CHANGE ABRUPTLY\n+ [3, 3, 3, 3],\n+ ]\n)\nassert np.allclose(actual_context_coef, expected_context_coef)\n-def test_coefficient_tracker_can_shift_coefficient_multiple_times_instantly_based_on_configured_interval():\n- drifter = CoefficientDrifter(drift_interval=2)\n+def test_coefficient_tracker_can_shift_linearly_instantly_based_on_configured_transition_period():\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=4,\n+ transition_period=2,\n+ effective_dim_context=2,\n+ effective_dim_action_context=2,\n+ )\n+\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=8)\n+\n+ expected_context_coef = np.asarray(\n+ [\n+ [2.0, 2.0],\n+ [2.0, 2.0], # First two rounds are the same\n+ [2.33333333, 2.33333333], # Next two rounds slowly transition\n+ [2.66666667, 2.66666667],\n+ [3.0, 3.0], # Now we start in the new coef again\n+ [3.0, 3.0],\n+ [3.33333333, 3.33333333], # Now we start transitioning again.\n+ [3.66666667, 3.66666667],\n+ ]\n+ )\n+\n+ assert np.allclose(actual_context_coef, expected_context_coef)\n+\n+def test_coefficient_tracker_can_shift_weighted_sampled_based_on_configured_transition_period():\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=4,\n+ transition_period=2,\n+ transition_type=\"weighted_sampled\",\n+ effective_dim_context=2,\n+ effective_dim_action_context=2,\n+ )\n+\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=8)\n+\n+ expected_context_coef = np.asarray(\n+ [\n+ [2.0, 2.0],\n+ [2.0, 2.0], # First two rounds are the same\n+ [2.0, 2.0], # Next two rounds are weighted sampled\n+ [3.0, 3.0],\n+ [3.0, 3.0], # Now we start in the new coef again\n+ [3.0, 3.0],\n+ [4.0, 4.0], # Next two rounds are weighted sampled again\n+ [4.0, 4.0],\n+ ]\n+ )\n+\n+ assert np.allclose(actual_context_coef, expected_context_coef)\n+\n+\n+def test_coefficient_tracker_can_shift_instantly_back_and_forth_between_seasons_using_seasonality_flag():\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=2,\n+ transition_period=0,\n+ seasonal=True,\n+ effective_dim_context=2,\n+ effective_dim_action_context=2,\n+ )\n+\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=8)\n+\n+ expected_context_coef = np.asarray(\n+ [\n+ [2.0, 2.0],\n+ [2.0, 2.0],\n+ [3.0, 3.0],\n+ [3.0, 3.0],\n+ [2.0, 2.0],\n+ [2.0, 2.0],\n+ [3.0, 3.0],\n+ [3.0, 3.0],\n+ ]\n+ )\n+\n+ assert np.allclose(actual_context_coef, expected_context_coef)\n+\n+\n+def test_coefficient_tracker_can_shift_instantly_under_base_coeficient():\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=2,\n+ transition_period=0,\n+ seasonal=True,\n+ base_coefficient_weight=0.8,\n+ effective_dim_context=2,\n+ effective_dim_action_context=2,\n+ )\n+\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=8)\n+\n+ expected_context_coef = np.asarray(\n+ [\n+ [1.2, 1.2],\n+ [1.2, 1.2],\n+ [1.4, 1.4],\n+ [1.4, 1.4],\n+ [1.2, 1.2],\n+ [1.2, 1.2],\n+ [1.4, 1.4],\n+ [1.4, 1.4],\n+ ]\n+ )\n+\n+ assert np.allclose(actual_context_coef, expected_context_coef)\n+\n+\n+def test_coefficient_tracker_update_coef_makes_next_coef_current_coef_and_samples_new_next_coef():\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=4, effective_dim_context=1, effective_dim_action_context=1\n+ )\n+\n+ drifter.context_coefs[0] = [1]\n+ drifter.context_coefs[1] = [2]\n+\n+ drifter.update_coef()\n+\n+ assert drifter.context_coefs[0] == [2]\n+ assert np.allclose(drifter.context_coefs[1], [4.0])\n+\n+\n+def test_coefficient_tracker_update_coef_samples_both_new_curr_and_next_on_first_pull():\n+ drifter = CoefficientDrifter(\n+ drift_interval=4,\n+ )\n+\n+ assert len(drifter.context_coefs) == 0\n+\n+ drifter.effective_dim_context = 1\n+ drifter.effective_dim_action_context = 1\n+\n+ drifter.update_coef()\n+\n+ assert drifter.context_coefs[0] is not None\n+ assert drifter.context_coefs[1] is not None\n+ assert not np.allclose(drifter.context_coefs[0], drifter.context_coefs[1])\n+\n+\n+def test_coefficient_tracker_can_set_effective_dim_context_on_first_sample():\neffective_dim_context = 4\neffective_dim_action_context = 3\n- actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=4, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)\n+ drifter = CoefficientDrifter(drift_interval=3)\n- expected_context_coef = np.asarray([\n- [-4.50475921, -5.60364479, -4.6827207 ],\n- [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE\n- [ 3.57444414, 7.18370309, -3.36258488],\n- [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE AGAIN\n- [ 3.57444414, 7.18370309, -3.36258488],\n+ assert drifter.effective_dim_context == None\n+ assert drifter.effective_dim_action_context == None\n+\n+ actual_context_coef, _, _ = drifter.get_coefficients(\n+ n_rounds=4,\n+ effective_dim_context=effective_dim_context,\n+ effective_dim_action_context=effective_dim_action_context,\n+ )\n+\n+ assert drifter.effective_dim_context == 4\n+ assert drifter.effective_dim_action_context == 3\n+\n+\n+def test_coefficient_tracker_raises_when_effective_dimensions_are_being_changed():\n+ effective_dim_context = 4\n+ effective_dim_action_context = 3\n+ drifter = CoefficientDrifter(\n+ drift_interval=3,\n+ effective_dim_context=effective_dim_context,\n+ effective_dim_action_context=effective_dim_action_context,\n+ )\n+\n+ assert drifter.effective_dim_context == 4\n+ assert drifter.effective_dim_action_context == 3\n+\n+ with pytest.raises(\n+ RuntimeError, match=r\"Trying to change the effective dimensions\"\n+ ):\n+ actual_context_coef, _, _ = drifter.get_coefficients(\n+ n_rounds=4, effective_dim_context=5, effective_dim_action_context=6\n+ )\n+\n+\n+def test_coefficient_tracker_can_shift_coefficient_multiple_times_instantly_based_on_configured_interval():\n+ effective_dim_context = 4\n+ effective_dim_action_context = 3\n+\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=2,\n+ effective_dim_context=effective_dim_context,\n+ effective_dim_action_context=effective_dim_action_context,\n+ )\n+\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=5)\n+\n+ expected_context_coef = np.asarray(\n+ [\n+ [2.0, 2.0, 2.0, 2.0],\n+ [2.0, 2.0, 2.0, 2.0], # AFTER THIS ROUND, THE COEFS SHOULD CHANGE\n+ [3.0, 3.0, 3.0, 3.0],\n+ [3.0, 3.0, 3.0, 3.0], # AFTER THIS ROUND, THE COEFS SHOULD CHANGE AGAIN\n+ [4.0, 4.0, 4.0, 4.0],\n]\n)\n@@ -841,28 +1128,39 @@ def test_coefficient_tracker_can_shift_coefficient_multiple_times_instantly_base\ndef test_coefficient_tracker_keeps_track_of_shifted_coefficient_based_on_configured_interval_between_batches():\n- drifter = CoefficientDrifter(drift_interval=2)\n-\neffective_dim_context = 4\neffective_dim_action_context = 3\n- actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=3, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)\n- expected_context_coef = np.asarray([\n- [-4.50475921, -5.60364479, -4.6827207 ],\n- [ 3.57444414, 7.18370309, -3.36258488], # AFTER THIS ROUND, THE OUTCOME SHOULD CHANGE\n- [ 3.57444414, 7.18370309, -3.36258488],]\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=2,\n+ effective_dim_context=effective_dim_context,\n+ effective_dim_action_context=effective_dim_action_context,\n+ )\n+\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=3)\n+\n+ expected_context_coef = np.asarray(\n+ [\n+ [2.0, 2.0, 2.0, 2.0],\n+ [2.0, 2.0, 2.0, 2.0], # AFTER THIS ROUND, THE COEFS SHOULD CHANGE\n+ [3.0, 3.0, 3.0, 3.0],\n+ ]\n)\nassert np.allclose(actual_context_coef, expected_context_coef)\n- actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=3, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)\n+ actual_context_coef, _, _ = drifter.get_coefficients(n_rounds=3)\n- expected_context_coef_2 = np.asarray([\n- [-4.50475921, -5.60364479, -4.6827207 ], # THIS ROUND SHOULD BE THE SAME AS THE LAST ONE FROM THE PREVIOUS\n- [ 3.57444414, 7.18370309, -3.36258488],\n- [ 3.57444414, 7.18370309, -3.36258488], # HERE THE COEF SHOULD CHANGE AGAIN\n+ expected_context_coef_2 = np.asarray(\n+ [\n+ [3.0, 3.0, 3.0, 3.0], # THIS ROUND SHOULD BE THE SAME AS THE LAST ONE\n+ [4.0, 4.0, 4.0, 4.0], # HERE THE COEF SHOULD CHANGE AGAIN\n+ [4.0, 4.0, 4.0, 4.0],\n]\n)\nassert np.allclose(actual_context_coef, expected_context_coef_2)\n-\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | Added transition period, transition style, seasonality and base coef to drift |
641,008 | 21.07.2022 14:02:24 | -7,200 | a189fe29faf00847ccb0bfd5e7b7c2c71a0f2faf | working state for drift, tests are failing | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -70,13 +70,15 @@ class CoefficientDrifter:\nbase_coefficient_weight: float = 0.0\neffective_dim_action_context: Optional[int] = None\neffective_dim_context: Optional[int] = None\n- played_rounds: int = 0\nrandom_state: int = 12345\n+ played_rounds: int = 0\ncontext_coefs: Optional[deque] = None\n- current_action_coef: Optional[np.ndarray] = None\n- current_action_context_coef: Optional[np.ndarray] = None\n- base_coef: Optional[np.ndarray] = None\n+ action_coefs: Optional[deque] = None\n+ context_action_coefs: Optional[deque] = None\n+ base_context_coef: Optional[np.ndarray] = None\n+ base_action_coef: Optional[np.ndarray] = None\n+ base_context_action_coef: Optional[np.ndarray] = None\ndef __post_init__(self) -> None:\nif self.random_state is None:\n@@ -84,37 +86,28 @@ class CoefficientDrifter:\nself.random_ = check_random_state(self.random_state)\nself.available_rounds = self.drift_interval\nself.context_coefs = deque(maxlen=2)\n+ self.action_coefs = deque(maxlen=2)\n+ self.context_action_coefs = deque(maxlen=2)\nif self.effective_dim_action_context and self.effective_dim_context:\nself.update_coef()\ndef update_coef(self) -> None:\n- if self.base_coef is None:\n- self.base_coef, _, _ = sample_random_uniform_coefficients(\n+ if self.base_context_coef is None:\n+ self.base_context_coef, self.base_action_coef, self.base_context_action_coef = sample_random_uniform_coefficients(\nself.effective_dim_action_context,\nself.effective_dim_context,\nself.random_,\n)\nif len(self.context_coefs) == 0:\n- (\n- tmp_context_coef,\n- tmp_action_coef,\n- tmp_action_context_coef,\n- ) = sample_random_uniform_coefficients(\n- self.effective_dim_action_context,\n- self.effective_dim_context,\n- self.random_,\n- )\n- self.context_coefs.append(tmp_context_coef)\n-\n- # We only drift on the context_coef for now.\n- if self.current_action_coef is None:\n- self.current_action_coef = tmp_action_coef\n- if self.current_action_context_coef is None:\n- self.current_action_context_coef = tmp_action_context_coef\n+ self.context_coefs.append(self.base_context_coef)\n+ self.action_coefs.append(self.base_action_coef)\n+ self.context_action_coefs.append(self.base_context_action_coef)\nif self.seasonal and len(self.context_coefs) == 2:\nself.context_coefs.rotate()\n+ self.action_coefs.rotate()\n+ self.context_action_coefs.rotate()\nelse:\n(\ntmp_context_coef,\n@@ -126,6 +119,8 @@ class CoefficientDrifter:\nself.random_,\n)\nself.context_coefs.append(tmp_context_coef)\n+ self.action_coefs.append(tmp_action_coef)\n+ self.context_action_coefs.append(tmp_action_context_coef)\ndef get_coefficients(\nself,\n@@ -153,27 +148,28 @@ class CoefficientDrifter:\nrequired_rounds = n_rounds\ncontext_coefs = []\n+ action_coefs = []\n+ context_action_coefs = []\nwhile required_rounds > 0:\nif required_rounds >= self.available_rounds:\n- self.append_current_coefs(context_coefs, rounds=self.available_rounds)\n+ self.append_current_coefs(context_coefs, action_coefs, context_action_coefs, rounds=self.available_rounds)\nrequired_rounds -= self.available_rounds\nself.update_coef()\nself.available_rounds = self.drift_interval\nelse:\n- self.append_current_coefs(context_coefs, rounds=required_rounds)\n+ self.append_current_coefs(context_coefs, action_coefs, context_action_coefs, rounds=required_rounds)\nself.available_rounds -= required_rounds\nrequired_rounds = 0\n- # For now, we only drift on the context coefs, rest is static\nreturn (\nnp.vstack(context_coefs),\n- self.current_action_coef,\n- self.current_action_context_coef,\n+ np.vstack(action_coefs),\n+ np.vstack(context_action_coefs),\n)\ndef append_current_coefs(\n- self, context_coefs: List[np.ndarray], rounds: int\n+ self, context_coefs: List[np.ndarray], action_coefs: List[np.ndarray], context_action_coefs: List[np.ndarray], rounds: int\n) -> None:\nshift_start = self.available_rounds - self.transition_period\n@@ -192,18 +188,22 @@ class CoefficientDrifter:\nif self.transition_type is \"weighted_sampled\":\nweights = self.random_.binomial(n=1, p=weights)\n- A = np.tile(self.context_coefs[0], (rounds, 1))\n- B = np.tile(self.context_coefs[1], (rounds, 1))\n+ context_coefs.append(self.compute_weighted_coefs(self.context_coefs, self.base_context_coef, rounds, weights))\n+ action_coefs.append(self.compute_weighted_coefs(self.action_coefs, self.base_action_coef, rounds, weights))\n+ context_action_coefs.append(self.compute_weighted_coefs(self.context_action_coefs, self.base_context_action_coef, rounds, weights))\n+\n- base_coef = self.base_coefficient_weight * self.base_coef\n+ def compute_weighted_coefs(self, coefs, base_coef, rounds, weights):\n+ base_coef = self.base_coefficient_weight * base_coef\n+ A = np.tile(coefs[0], [rounds] + [1 for _ in coefs[0].shape])\n+ B = np.tile(coefs[1], [rounds] + [1 for _ in coefs[1].shape])\ncoefs = (\nbase_coef\n- + A * np.expand_dims((1 - self.base_coefficient_weight) * (1 - weights), 1)\n- + B * np.expand_dims((1 - self.base_coefficient_weight) * weights, 1)\n+ + A * np.expand_dims((1 - self.base_coefficient_weight) * (1 - weights), list(range(1, len(A.shape))))\n+ + B * np.expand_dims((1 - self.base_coefficient_weight) * weights, list(range(1, len(B.shape))))\n)\n-\n- context_coefs.append(coefs)\n+ return coefs\n@dataclass\n@@ -1045,15 +1045,28 @@ def _base_reward_function(\ncontext_values = np.tile(\nnp.sum(effective_context_ * context_coef_, axis=1), (n_actions, 1)\n).T\n- action_values = np.tile(action_coef_ @ effective_action_context_.T, (datasize, 1))\n+\n+ action_values = action_coef_ @ effective_action_context_.T\n+ if action_coef_.shape[0] != datasize:\n+ action_values = np.tile(action_values, (datasize, 1))\n+\n+ if action_coef_.shape[0] != datasize:\ncontext_action_values = (\neffective_context_ @ context_action_coef_ @ effective_action_context_.T\n)\n- expected_rewards = context_values + action_values + context_action_values\n- expected_rewards = (\n- degree * (expected_rewards - expected_rewards.mean()) / expected_rewards.std()\n+ else:\n+ effective_context_ = np.expand_dims(effective_context_, axis=1)\n+ context_action_coef_interactions = np.squeeze(np.matmul(effective_context_, context_action_coef_), axis=1)\n+\n+ context_action_values = (\n+ context_action_coef_interactions @ effective_action_context_.T\n)\n+ expected_rewards = context_values + action_values + context_action_values\n+ # expected_rewards = (\n+ # degree * (expected_rewards - expected_rewards.mean()) / expected_rewards.std()\n+ # )\n+\nreturn expected_rewards\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -852,22 +852,10 @@ def test_coefficient_tracker_can_shift_expected_rewards_instantly_based_on_confi\nexpected_expected_rewards = np.asarray(\n[\n- [\n- -8.63608908,\n- -8.63608908,\n- -8.63608908,\n- ], # This round has a different context and should have diff E[r]\n- [\n- 2.57393219,\n- 2.57393219,\n- 2.57393219,\n- ], # The next two rounds have the same context and should have identical\n- [2.57393219, 2.57393219, 2.57393219], # E[r]\n- [\n- 3.4882247,\n- 3.4882247,\n- 3.4882247,\n- ], # This round has the same context but has experienced drift.\n+ [-7.88993004, -7.88993004, -7.88993004], # This round has a different context and should have diff E[r]\n+ [ 0.9467916 , 0.9467916 , 0.9467916 ], # The next two rounds have the same context and should have identical\n+ [ 0.9467916 , 0.9467916 , 0.9467916 ], # E[r]\n+ [ 5.99634683, 5.99634683, 5.99634683], # This round has the same context but has experienced drift.\n]\n)\n@@ -1164,3 +1152,64 @@ def test_coefficient_tracker_keeps_track_of_shifted_coefficient_based_on_configu\n)\nassert np.allclose(actual_context_coef, expected_context_coef_2)\n+\n+\n+def test_coefficients_can_drift_for_the_action_coefs():\n+ effective_dim_context = 4\n+ effective_dim_action_context = 3\n+\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=2,\n+ effective_dim_context=effective_dim_context,\n+ effective_dim_action_context=effective_dim_action_context,\n+ )\n+\n+ _, actual_action_coef, _ = drifter.get_coefficients(n_rounds=3)\n+\n+ expected_action_coef = np.asarray(\n+ [\n+ [2.0, 2.0, 2.0],\n+ [2.0, 2.0, 2.0], # AFTER THIS ROUND, THE COEFS SHOULD CHANGE\n+ [3.0, 3.0, 3.0],\n+ ]\n+ )\n+\n+ assert np.allclose(actual_action_coef, expected_action_coef)\n+\n+\n+\n+def test_coefficients_can_drift_for_the_action_coefs():\n+ effective_dim_context = 4\n+ effective_dim_action_context = 3\n+\n+ with mock.patch(\n+ \"obp.dataset.synthetic.sample_random_uniform_coefficients\",\n+ MockCoefSample().fake_sample,\n+ ):\n+ drifter = CoefficientDrifter(\n+ drift_interval=2,\n+ effective_dim_context=effective_dim_context,\n+ effective_dim_action_context=effective_dim_action_context,\n+ )\n+\n+ _, _, actual_context_action_coef = drifter.get_coefficients(n_rounds=3)\n+\n+ expected_context_action_coef = np.asarray([[[2., 2., 2.],\n+ [2., 2., 2.],\n+ [2., 2., 2.],\n+ [2., 2., 2.]],\n+ [[2., 2., 2.],\n+ [2., 2., 2.],\n+ [2., 2., 2.],\n+ [2., 2., 2.]],\n+ [[3., 3., 3.],\n+ [3., 3., 3.],\n+ [3., 3., 3.],\n+ [3., 3., 3.]]])\n+\n+ assert np.allclose(actual_context_action_coef, expected_context_action_coef)\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_simulator.py",
"new_path": "tests/simulator/test_simulator.py",
"diff": "@@ -7,7 +7,8 @@ from obp.policy import BaseContextFreePolicy\nfrom obp.policy.linear import LinTS, LinUCB\nfrom obp.policy.contextfree import EpsilonGreedy, Random, BernoulliTS\n-from obp.dataset.synthetic import logistic_reward_function, ExponentialDelaySampler\n+from obp.dataset.synthetic import logistic_reward_function, ExponentialDelaySampler, CoefficientDrifter, \\\n+ logistic_sparse_reward_function\nfrom obp.dataset import SyntheticBanditDataset\nfrom obp.policy.policy_type import PolicyType\nfrom obp.simulator import run_bandit_simulation\n@@ -223,3 +224,76 @@ def test_run_bandit_simulation_applies_policy_directly_when_no_delay():\n]\nassert tracker.parameter_updates == expected_updates\n+\n+\n+def test_simulator_can_create_identical_simulations_using_seeds():\n+ sample_n_rounds = 40\n+ drift_interval = 20\n+ transition_period = 0\n+ n_actions = 3\n+ dim_context = 5\n+\n+ drifter = CoefficientDrifter(\n+ drift_interval=drift_interval,\n+ transition_period=transition_period,\n+ transition_type=\"linear\", # linear or weighted_sampled\n+ seasonal=False,\n+ base_coefficient_weight=.3,\n+ random_state=1234\n+ )\n+\n+ dataset_1 = SyntheticBanditDataset(\n+ n_actions=n_actions,\n+ dim_context=dim_context,\n+ reward_type=\"binary\", # \"binary\" or \"continuous\"\n+ reward_function=logistic_sparse_reward_function,\n+ delay_function=None,\n+ coef_function=drifter.get_coefficients,\n+ behavior_policy_function=None, # uniformly random\n+ random_state=12345,\n+ )\n+\n+ training_bandit_dataset_1 = dataset_1.obtain_batch_bandit_feedback(n_rounds=sample_n_rounds)\n+\n+ policy = EpsilonGreedy(n_actions=n_actions, epsilon=0.1, random_state=12345)\n+ train_action_dists_1 = run_bandit_simulation(\n+ bandit_feedback=training_bandit_dataset_1,\n+ policy=policy\n+ )\n+\n+ rewards_1 = dataset_1.count_ground_truth_policy_rewards(train_action_dists_1.squeeze(axis=2)[:drift_interval],\n+ training_bandit_dataset_1[\"factual_reward\"][\n+ :drift_interval])\n+ drifter = CoefficientDrifter(\n+ drift_interval=drift_interval,\n+ transition_period=transition_period,\n+ transition_type=\"linear\", # linear or weighted_sampled\n+ seasonal=False,\n+ base_coefficient_weight=1.0,\n+ random_state=1234\n+ )\n+\n+\n+ dataset_2 = SyntheticBanditDataset(\n+ n_actions=n_actions,\n+ dim_context=dim_context,\n+ reward_type=\"binary\", # \"binary\" or \"continuous\"\n+ reward_function=logistic_sparse_reward_function,\n+ delay_function=None,\n+ coef_function=drifter.get_coefficients,\n+ behavior_policy_function=None, # uniformly random\n+ random_state=12345,\n+ )\n+\n+ training_bandit_dataset_2 = dataset_2.obtain_batch_bandit_feedback(n_rounds=sample_n_rounds)\n+\n+ policy = EpsilonGreedy(n_actions=n_actions, epsilon=0.1, random_state=12345)\n+ train_action_dists_2 = run_bandit_simulation(\n+ bandit_feedback=training_bandit_dataset_2,\n+ policy=policy\n+ )\n+\n+ rewards_2 = dataset_2.count_ground_truth_policy_rewards(train_action_dists_2.squeeze(axis=2)[:drift_interval],\n+ training_bandit_dataset_2[\"factual_reward\"][:drift_interval])\n+\n+ assert rewards_1 == rewards_2\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | working state for drift, tests are failing |
641,008 | 26.07.2022 15:41:35 | -7,200 | 600d7af8890356a9729b85dc97584b884395ce93 | speed up simulation by not doing constant vstacks | [
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "@@ -377,12 +377,23 @@ class BanditPolicySimulator:\nfor _ in tqdm(range(n_rounds)):\nself.step()\nif batch_bandit_rounds:\n+ start_round = self.rounds_played\n+ try:\n+ # Append context and ground truth before executing rounds for efficiency reasons\nself.append_contexts(batch_bandit_rounds.context)\nself.append_ground_truth_rewards(batch_bandit_rounds.rewards)\nfor bandit_round in tqdm(batch_bandit_rounds):\nself.current_round = bandit_round\nself._step()\n+ except Exception as e:\n+ # If anything goes wrong, we want to remove all contexts and rewards that have not yet been shown yet\n+ total_rounds = batch_bandit_rounds.context.shape[0]\n+ remaining_rounds = total_rounds - (self.rounds_played - start_round)\n+ self.contexts = self.contexts[:-remaining_rounds,]\n+ self.ground_truth_rewards = self.ground_truth_rewards[:-remaining_rounds,]\n+ raise e\n+\ndef delayed_update_policy(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_simulator.py",
"new_path": "tests/simulator/test_simulator.py",
"diff": "@@ -478,6 +478,65 @@ def test_bandit_policy_simulator_do_simulation_over_batch_data():\nassert simulator.rounds_played == 5\n+def test_bandit_policy_simulator_cleans_up_when_simulation_is_interupted():\n+ env = BanditEnvironmentSimulator(\n+ n_actions=3,\n+ dim_context=4,\n+ reward_function=logistic_reward_function,\n+ random_state=12345,\n+ )\n+\n+ class CrashingBanditPolicySimulator(BanditPolicySimulator):\n+ def _step(self):\n+ super()._step()\n+ if self.rounds_played == 50:\n+ raise RuntimeError\n+\n+ simulator = CrashingBanditPolicySimulator(\n+ policy=EpsilonGreedy(n_actions=3, epsilon=0.1, random_state=12345),\n+ environment=env,\n+ )\n+\n+ with pytest.raises(RuntimeError):\n+ simulator.steps(batch_bandit_rounds=env.next_bandit_round_batch(100))\n+\n+ assert simulator.rounds_played == 50\n+ assert simulator.contexts.shape == (50, 4)\n+ assert simulator.ground_truth_rewards.shape == (50, 3)\n+\n+\n+def test_bandit_policy_simulator_cleans_up_keeping_previous_rounds_when_simulation_is_interupted():\n+ env = BanditEnvironmentSimulator(\n+ n_actions=3,\n+ dim_context=4,\n+ reward_function=logistic_reward_function,\n+ random_state=12345,\n+ )\n+\n+ class CrashingBanditPolicySimulator(BanditPolicySimulator):\n+ def _step(self):\n+ super()._step()\n+ if self.rounds_played == 5:\n+ raise RuntimeError\n+\n+ simulator = CrashingBanditPolicySimulator(\n+ policy=EpsilonGreedy(n_actions=3, epsilon=0.1, random_state=12345),\n+ environment=env,\n+ )\n+\n+ batch_1 = env.next_bandit_round_batch(2)\n+ simulator.steps(batch_bandit_rounds=batch_1)\n+\n+ batch_2 = env.next_bandit_round_batch(6)\n+ with pytest.raises(RuntimeError):\n+ simulator.steps(batch_bandit_rounds=batch_2)\n+\n+ assert simulator.rounds_played == 5\n+ assert np.allclose(simulator.contexts, np.vstack((batch_1.context, batch_2.context))[:5,])\n+ assert np.allclose(simulator.ground_truth_rewards, np.vstack((batch_1.rewards, batch_2.rewards))[:5,])\n+\n+\n+\ndef test_ipw_can_be_learned_from_logged_data_generated_by_simulation():\nfrom sklearn.ensemble import RandomForestClassifier as RandomForest\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | speed up simulation by not doing constant vstacks |
641,008 | 26.07.2022 17:01:56 | -7,200 | 74e9a9f92c397d866e48af995923ba179bdec91b | remove accidental code duplication and style changes | [
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "@@ -390,12 +390,14 @@ class BanditPolicySimulator:\n# If anything goes wrong, we want to remove all contexts and rewards that have not yet been shown yet\ntotal_rounds = batch_bandit_rounds.context.shape[0]\nremaining_rounds = total_rounds - (self.rounds_played - start_round)\n- self.contexts = self.contexts[:-remaining_rounds,]\n- self.ground_truth_rewards = self.ground_truth_rewards[:-remaining_rounds,]\n+ self.contexts = self.contexts[\n+ :-remaining_rounds,\n+ ]\n+ self.ground_truth_rewards = self.ground_truth_rewards[\n+ :-remaining_rounds,\n+ ]\nraise e\n-\n-\ndef delayed_update_policy(\nself, available_rounds: List[int], current_round: int\n) -> None:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_delay_sampler.py",
"new_path": "tests/simulator/test_delay_sampler.py",
"diff": "@@ -41,11 +41,13 @@ def test_synthetic_sample_results_in_sampled_delay_with_weighted_delays_per_arm(\nactual_bandits_dataset = dataset.next_bandit_round_batch(n_rounds=5)\nexpected_round_delays = np.asarray(\n- [[35., 38., 4.],\n- [3., 84., 17.],\n- [44., 106., 26.],\n- [14., 138., 61.],\n- [1., 12., 7.]]\n+ [\n+ [35.0, 38.0, 4.0],\n+ [3.0, 84.0, 17.0],\n+ [44.0, 106.0, 26.0],\n+ [14.0, 138.0, 61.0],\n+ [1.0, 12.0, 7.0],\n+ ]\n)\nassert (actual_bandits_dataset.round_delays == expected_round_delays).all()\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_simulator.py",
"new_path": "tests/simulator/test_simulator.py",
"diff": "@@ -532,9 +532,18 @@ def test_bandit_policy_simulator_cleans_up_keeping_previous_rounds_when_simulati\nsimulator.steps(batch_bandit_rounds=batch_2)\nassert simulator.rounds_played == 5\n- assert np.allclose(simulator.contexts, np.vstack((batch_1.context, batch_2.context))[:5,])\n- assert np.allclose(simulator.ground_truth_rewards, np.vstack((batch_1.rewards, batch_2.rewards))[:5,])\n-\n+ assert np.allclose(\n+ simulator.contexts,\n+ np.vstack((batch_1.context, batch_2.context))[\n+ :5,\n+ ],\n+ )\n+ assert np.allclose(\n+ simulator.ground_truth_rewards,\n+ np.vstack((batch_1.rewards, batch_2.rewards))[\n+ :5,\n+ ],\n+ )\ndef test_ipw_can_be_learned_from_logged_data_generated_by_simulation():\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | remove accidental code duplication and style changes |
641,008 | 01.08.2022 11:25:15 | -7,200 | ff54d195f8c155bae537807bb2bbad742fc7aa38 | speed up simulation by using python lists instead of constant numpy appends | [
{
"change_type": "MODIFY",
"old_path": "obp/simulator/simulator.py",
"new_path": "obp/simulator/simulator.py",
"diff": "@@ -50,7 +50,7 @@ class BanditRounds:\nround_delays: np.ndarray\ndef _get_bandit_round(self) -> BanditRound:\n- if np.any(self.round_delays):\n+ if self.round_delays is not None:\nround_delays = self.round_delays[self.idx]\nelse:\nround_delays = None\n@@ -293,17 +293,35 @@ class BanditPolicySimulator:\nreward_round_lookup: defaultdict = None\n# To keep track of for after\n- selected_actions: np.ndarray = None\n- obtained_rewards: np.ndarray = None\n- ground_truth_rewards: np.ndarray = None\n- contexts: np.ndarray = None\n+ _selected_actions: List[int] = None\n+ _obtained_rewards: List[int] = None\n+ _ground_truth_rewards: List[np.ndarray] = None\n+ _contexts: List[np.ndarray] = None\ntotal_reward: int = 0\nrounds_played: int = 0\ncurrent_round: BanditRound = None\n+ @property\n+ def selected_actions(self) -> np.ndarray:\n+ return np.asarray(self._selected_actions)\n+\n+ @property\n+ def obtained_rewards(self) -> np.ndarray:\n+ return np.asarray(self._obtained_rewards)\n+\n+ @property\n+ def ground_truth_rewards(self) -> np.ndarray:\n+ return np.vstack(self._ground_truth_rewards)\n+\n+ @property\n+ def contexts(self) -> np.ndarray:\n+ return np.vstack(self._contexts)\n+\ndef __post_init__(self):\n- self.selected_actions = np.empty(0, int)\n- self.obtained_rewards = np.empty(0, int)\n+ self._selected_actions = []\n+ self._obtained_rewards = []\n+ self._ground_truth_rewards = []\n+ self._contexts = []\nself.reward_round_lookup = defaultdict(list)\ndef start_next_bandit_round(self, bandit_round: BanditRound = None) -> None:\n@@ -316,18 +334,10 @@ class BanditPolicySimulator:\nself.append_ground_truth_rewards(self.current_round.rewards)\ndef append_ground_truth_rewards(self, ground_truth_rewards):\n- if self.ground_truth_rewards is None:\n- self.ground_truth_rewards = ground_truth_rewards\n- else:\n- self.ground_truth_rewards = np.vstack(\n- (self.ground_truth_rewards, ground_truth_rewards)\n- )\n+ self._ground_truth_rewards.append(ground_truth_rewards)\ndef append_contexts(self, context):\n- if self.contexts is None:\n- self.contexts = context\n- else:\n- self.contexts = np.vstack((self.contexts, context))\n+ self._contexts.append(context)\ndef step(self, bandit_round: BanditRound = None):\nself.start_next_bandit_round(bandit_round)\n@@ -335,10 +345,10 @@ class BanditPolicySimulator:\ndef _step(self):\nselected_action = self.select_action()\n- self.selected_actions = np.append(self.selected_actions, selected_action)\n+ self._selected_actions.append(selected_action)\nreward_ = self.current_round.rewards[selected_action]\n- self.obtained_rewards = np.append(self.obtained_rewards, reward_)\n+ self._obtained_rewards.append(reward_)\nself.total_reward += reward_\ndelays = self.current_round.round_delays\n@@ -377,34 +387,16 @@ class BanditPolicySimulator:\nfor _ in tqdm(range(n_rounds)):\nself.step()\nif batch_bandit_rounds:\n- start_round = self.rounds_played\n- try:\n- # Append context and ground truth before executing rounds for efficiency reasons\n- self.append_contexts(batch_bandit_rounds.context)\n- self.append_ground_truth_rewards(batch_bandit_rounds.rewards)\n-\nfor bandit_round in tqdm(batch_bandit_rounds):\n- self.current_round = bandit_round\n- self._step()\n- except Exception as e:\n- # If anything goes wrong, we want to remove all contexts and rewards that have not yet been shown yet\n- total_rounds = batch_bandit_rounds.context.shape[0]\n- remaining_rounds = total_rounds - (self.rounds_played - start_round)\n- self.contexts = self.contexts[\n- :-remaining_rounds,\n- ]\n- self.ground_truth_rewards = self.ground_truth_rewards[\n- :-remaining_rounds,\n- ]\n- raise e\n+ self.step(bandit_round)\ndef delayed_update_policy(\nself, available_rounds: List[int], current_round: int\n) -> None:\nfor available_round_idx in available_rounds:\n- available_action = self.selected_actions[available_round_idx]\n- available_context = self.contexts[available_round_idx]\n- available_rewards = self.obtained_rewards[available_round_idx]\n+ available_action = self._selected_actions[available_round_idx]\n+ available_context = self._contexts[available_round_idx]\n+ available_rewards = self._obtained_rewards[available_round_idx]\nself.update_policy(available_context, available_action, available_rewards)\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | speed up simulation by using python lists instead of constant numpy appends |
641,008 | 11.08.2022 13:43:45 | -7,200 | 969b60630a96ee3213b03d5c382102f3b98ba93b | optional z-score | [
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -113,7 +113,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\n.. code-block:: python\n- >>> from obp.dataset import (\n+ >>> from obp.env import (\nSyntheticBanditDataset,\nlogistic_reward_function\n)\n@@ -418,6 +418,7 @@ def logistic_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ncoef_function: coef_func_signature = sample_random_uniform_coefficients,\n+ z_score: bool = True,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards.\n@@ -459,6 +460,7 @@ def logistic_polynomial_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ncoef_function: coef_func_signature = sample_random_uniform_coefficients,\n+ z_score: bool = True,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards with polynomial feature transformations.\n@@ -501,6 +503,7 @@ def logistic_sparse_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ncoef_function: coef_func_signature = sample_random_uniform_coefficients,\n+ z_score: bool = True,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Logistic mean reward function for binary rewards with small effective feature dimension.\n@@ -535,6 +538,7 @@ def logistic_sparse_reward_function(\ndegree=4,\neffective_dim_ratio=0.3,\ncoef_function=coef_function,\n+ z_score=z_score,\nrandom_state=random_state,\n)\n@@ -545,6 +549,7 @@ def linear_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ncoef_function: coef_func_signature = sample_random_uniform_coefficients,\n+ z_score: bool = True,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Linear mean reward function for continuous rewards.\n@@ -572,6 +577,7 @@ def linear_reward_function(\naction_context=action_context,\ndegree=1,\ncoef_function=coef_function,\n+ z_score=z_score,\nrandom_state=random_state,\n)\n@@ -580,6 +586,7 @@ def polynomial_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ncoef_function: coef_func_signature = sample_random_uniform_coefficients,\n+ z_score: bool = True,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Polynomial mean reward function for continuous rewards.\n@@ -612,6 +619,7 @@ def polynomial_reward_function(\naction_context=action_context,\ndegree=3,\ncoef_function=coef_function,\n+ z_score=z_score,\nrandom_state=random_state,\n)\n@@ -620,6 +628,7 @@ def sparse_reward_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ncoef_function: coef_func_signature = sample_random_uniform_coefficients,\n+ z_score: bool = True,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Sparse mean reward function for continuous rewards.\n@@ -654,6 +663,7 @@ def sparse_reward_function(\ndegree=4,\neffective_dim_ratio=0.3,\ncoef_function=coef_function,\n+ z_score=z_score,\nrandom_state=random_state,\n)\n@@ -664,6 +674,7 @@ def _base_reward_function(\ndegree: int = 3,\neffective_dim_ratio: float = 1.0,\ncoef_function: coef_func_signature = sample_random_uniform_coefficients,\n+ z_score: bool = True,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Base function to define mean reward functions.\n@@ -714,6 +725,10 @@ def _base_reward_function(\nonly `dim_context * effective_dim_ratio` fraction of randomly selected dimensions\nwill be used as relevant dimensions to generate expected rewards.\n+ z_score: boolean, default=True\n+ Boolean to enable/disable the calculation of a z-score over the resulting rewards. In case the environment\n+ is stationary, this can be turned on. But when the\n+\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\n@@ -793,9 +808,10 @@ def _base_reward_function(\n)\nexpected_rewards = context_values + action_values + context_action_values\n- expected_rewards = (\n- degree * (expected_rewards - expected_rewards.mean()) / expected_rewards.std()\n- )\n+ if z_score:\n+ expected_rewards = expected_rewards - expected_rewards.mean() / expected_rewards.std()\n+\n+ expected_rewards = degree * expected_rewards\nreturn expected_rewards\n@@ -875,6 +891,7 @@ def _base_behavior_policy_function(\ncontext: np.ndarray,\naction_context: np.ndarray,\ndegree: int = 3,\n+ z_score: bool = True,\nrandom_state: Optional[int] = None,\n) -> np.ndarray:\n\"\"\"Base function to define behavior policy functions.\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | optional z-score |
641,008 | 18.09.2022 15:29:24 | 25,200 | ad015d18b99214e29f950ab140233b77e36af04e | addressed comments related to naming and addition of argument docstring for the coefficient drifter | [
{
"change_type": "RENAME",
"old_path": "examples/quickstart/online-vs-ope-simulation.ipynb",
"new_path": "examples/quickstart/online-bandit-vs-opl-simulation.ipynb",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "examples/quickstart/delay-simulation.ipynb",
"new_path": "examples/quickstart/online-bandit-with-delay-simulation.ipynb",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "examples/quickstart/drift-simulation.ipynb",
"new_path": "examples/quickstart/online-bandit-with-drift-simulation.ipynb",
"diff": ""
},
{
"change_type": "MODIFY",
"old_path": "obp/dataset/synthetic.py",
"new_path": "obp/dataset/synthetic.py",
"diff": "@@ -113,7 +113,7 @@ class SyntheticBanditDataset(BaseBanditDataset):\n.. code-block:: python\n- >>> from obp.env import (\n+ >>> from obp.dataset import (\nSyntheticBanditDataset,\nlogistic_reward_function\n)\n@@ -431,6 +431,9 @@ def logistic_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation of actions.\n+ z_score: boolean, default=True\n+ Controls whether a z-score will be calculated over the computed logits.\n+\ncoef_function: Callable, default=sample_random_uniform_coefficients\nFunction for generating the coefficients used for the context, action and context/action interactions.\nBy default, the coefficients are randomly uniformly drawn.\n@@ -450,6 +453,7 @@ def logistic_reward_function(\naction_context=action_context,\ndegree=1,\nrandom_state=random_state,\n+ z_score=z_score,\ncoef_function=coef_function,\n)\n@@ -478,6 +482,9 @@ def logistic_polynomial_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation of actions.\n+ z_score: boolean, default=True\n+ Controls whether a z-score will be calculated over the computed logits.\n+\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\n@@ -493,6 +500,7 @@ def logistic_polynomial_reward_function(\naction_context=action_context,\ndegree=3,\ncoef_function=coef_function,\n+ z_score=z_score,\nrandom_state=random_state,\n)\n@@ -522,6 +530,9 @@ def logistic_sparse_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation of actions.\n+ z_score: boolean, default=True\n+ Controls whether a z-score will be calculated over the computed logits.\n+\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\n@@ -562,6 +573,9 @@ def linear_reward_function(\naction_context: array-like, shape (n_actions, dim_action_context)\nVector representation of actions.\n+ z_score: boolean, default=True\n+ Controls whether a z-score will be calculated over the computed logits.\n+\nrandom_state: int, default=None\nControls the random seed in sampling dataset.\n"
},
{
"change_type": "MODIFY",
"old_path": "obp/simulator/coefficient_drifter.py",
"new_path": "obp/simulator/coefficient_drifter.py",
"diff": "@@ -22,6 +22,33 @@ class CoefficientDrifter:\nParameters\n-----------\n+ drift_interval: int\n+ Controls interval of steps at which the coefficients are updated.\n+\n+ transition_period: int, default=0\n+ Controls the period in which the coefficients are transitioning between new and old. The transition period\n+ always happened before the drift interval. Meaning, that if the drift interval is 5000 and the transition period\n+ 500, the transition will be between step 4500 and step 5000.\n+\n+ transition_type: str, default=\"linear\"\n+ The type of transition (linear or weighted_sampled) to be applied while transitioning between two sets of\n+ coefficients.\n+\n+ seasonal: bool, default=False\n+ When True, the coefficients will shift between two sets of coefficients representing a seasonal shift.\n+\n+ base_coefficient_weight: float, default=0.0\n+ A floating point between 0.0 and 1.0 that represents a base coefficient weight that is consistent regardless of\n+ any drift. This can be used to ensure the severity of the drift over time.\n+\n+ effective_dim_action_context: (optional) int, default=None\n+ Specifies the dimensions of the action context coefficients.\n+\n+ effective_dim_context: (optional) int, default=None\n+ Specifies the dimensions of the context coefficients.\n+\n+ random_state: int, default=12345\n+ Controls the random seed\nReferences\n------------\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | addressed comments related to naming and addition of argument docstring for the coefficient drifter |
641,008 | 22.09.2022 11:42:08 | 25,200 | d4be5c5bc18537a5b4fb349a95598f52d74e6b27 | Renamed online to replay through the examples and README | [
{
"change_type": "MODIFY",
"old_path": "examples/README.md",
"new_path": "examples/README.md",
"diff": "@@ -5,6 +5,6 @@ This page contains a list of examples written with Open Bandit Pipeline.\n- [`obd/`](./obd/): example implementations for evaluating standard off-policy estimators with the small sample Open Bandit Dataset.\n- [`synthetic/`](./synthetic/): example implementations for evaluating several off-policy estimators with synthetic bandit datasets.\n- [`multiclass/`](./multiclass/): example implementations for evaluating several off-policy estimators with multi-class classification datasets.\n-- [`online/`](./online/): example implementations for evaluating Replay Method with online bandit algorithms.\n+- [`replay/`](./replay/): example implementations for evaluating Replay Method with online bandit algorithms.\n- [`opl/`](./opl/): example implementations for comparing the performance of several off-policy learners with synthetic bandit datasets.\n- [`quickstart/`](./quickstart/): some quickstart notebooks to guide the usage of Open Bandit Pipeline.\n"
},
{
"change_type": "RENAME",
"old_path": "examples/online/README.md",
"new_path": "examples/replay/README.md",
"diff": "-# Example with Online Bandit Algorithms\n+# Replay Example with Online Bandit Algorithms\n## Description\n"
},
{
"change_type": "RENAME",
"old_path": "examples/online/evaluate_off_policy_estimators.py",
"new_path": "examples/replay/evaluate_off_policy_estimators.py",
"diff": "@@ -25,7 +25,7 @@ ope_estimators = [ReplayMethod()]\nif __name__ == \"__main__\":\nparser = argparse.ArgumentParser(\n- description=\"evaluate off-policy estimators with online bandit algorithms and synthetic bandit data.\"\n+ description=\"evaluate off-policy estimators with replay bandit algorithms and synthetic bandit data.\"\n)\nparser.add_argument(\n\"--n_runs\", type=int, default=1, help=\"number of simulations in the experiment.\"\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | Renamed online to replay through the examples and README |
641,008 | 03.11.2022 16:40:17 | -3,600 | 97980a3087d00647bf104e8faac6bb342344b5c8 | more robust tests by removing fixed values and increasing rounds | [
{
"change_type": "MODIFY",
"old_path": "tests/dataset/test_synthetic.py",
"new_path": "tests/dataset/test_synthetic.py",
"diff": "@@ -581,12 +581,5 @@ def test_base_reward_create_a_matrix_with_expected_rewards_with_identical_expect\ncontext, action_context, degree=5, effective_dim_ratio=1.0, random_state=12345\n)\n- expected_expected_rewards = np.asarray(\n- [\n- [-4.50475921, -5.60364479, -4.6827207],\n- [3.57444414, 7.18370309, -3.36258488],\n- [3.57444414, 7.18370309, -3.36258488],\n- ]\n- )\n-\n- assert np.allclose(actual_expected_rewards, expected_expected_rewards)\n+ assert np.allclose(actual_expected_rewards[1], actual_expected_rewards[2])\n+ assert not np.allclose(actual_expected_rewards[0], actual_expected_rewards[1])\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/conftest.py",
"new_path": "tests/ope/conftest.py",
"diff": "@@ -157,26 +157,6 @@ def feedback_key_set() -> Set[str]:\n\"reward\",\n}\n-\n-# bandit_feedback[\"expected_reward\"][0]\[email protected](scope=\"session\")\n-def expected_reward_0() -> np.ndarray:\n- return np.array(\n- [\n- 0.816124,\n- 0.625855,\n- 0.386785,\n- 0.301848,\n- 0.726634,\n- 0.218076,\n- 0.482361,\n- 0.625271,\n- 0.586353,\n- 0.386384,\n- ]\n- )\n-\n-\n# random evaluation policy\[email protected](scope=\"session\")\ndef random_action_dist(synthetic_bandit_feedback) -> np.ndarray:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_all_estimators.py",
"new_path": "tests/ope/test_all_estimators.py",
"diff": "@@ -528,23 +528,6 @@ def test_estimate_intervals_of_all_estimators_using_valid_input_data(\n)\n-def test_fixture(\n- synthetic_bandit_feedback: BanditFeedback,\n- expected_reward_0: np.ndarray,\n- feedback_key_set: Set[str],\n- random_action_dist: np.ndarray,\n-) -> None:\n- \"\"\"\n- Check the validity of the fixture data generated by conftest.py\n- \"\"\"\n- np.testing.assert_array_almost_equal(\n- expected_reward_0, synthetic_bandit_feedback[\"expected_reward\"][0]\n- )\n- assert feedback_key_set == set(\n- synthetic_bandit_feedback.keys()\n- ), f\"Key set of bandit feedback should be {feedback_key_set}, but {synthetic_bandit_feedback.keys()}\"\n-\n-\ndef test_performance_of_ope_estimators_using_random_evaluation_policy(\nsynthetic_bandit_feedback: BanditFeedback, random_action_dist: np.ndarray\n) -> None:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ope/test_offline_estimation_performance.py",
"new_path": "tests/ope/test_offline_estimation_performance.py",
"diff": "@@ -345,7 +345,7 @@ def test_offline_estimation_performance(\nreturn relative_ee_i\n- n_runs = 10\n+ n_runs = 20\nprocessed = Parallel(\nn_jobs=-1,\nverbose=0,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_coefficient_drifter.py",
"new_path": "tests/simulator/test_coefficient_drifter.py",
"diff": "@@ -70,28 +70,12 @@ def test_coefficient_tracker_can_shift_expected_rewards_instantly_based_on_confi\nrandom_state=12345,\n)\n- expected_expected_rewards = np.asarray(\n- [\n- [\n- -6.82778334,\n- -6.82778334,\n- -6.82778334,\n- ], # This round has a different context and should have diff E[r]\n- [\n- -0.2354408,\n- -0.2354408,\n- -0.2354408,\n- ], # The next two rounds have the same context and should have identical\n- [-0.2354408, -0.2354408, -0.2354408], # E[r]\n- [\n- 7.29866494,\n- 7.29866494,\n- 7.29866494,\n- ], # This round has the same context but has experienced drift.\n- ]\n- )\n-\n- assert np.allclose(actual_expected_rewards, expected_expected_rewards)\n+ # This round has a different context and should have diff E[r]\n+ assert not np.allclose(actual_expected_rewards[0], actual_expected_rewards[1])\n+ # The next two rounds have the same context and should have identical\n+ assert np.allclose(actual_expected_rewards[1], actual_expected_rewards[2])\n+ # This round has the same context but has experienced drift.\n+ assert not np.allclose(actual_expected_rewards[2], actual_expected_rewards[3])\ndef test_coefficient_tracker_can_shift_coefficient_instantly_based_on_configured_interval():\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_delay_sampler.py",
"new_path": "tests/simulator/test_delay_sampler.py",
"diff": "@@ -38,18 +38,11 @@ def test_synthetic_sample_results_in_sampled_delay_with_weighted_delays_per_arm(\nrandom_state=12345,\n)\n- actual_bandits_dataset = dataset.next_bandit_round_batch(n_rounds=5)\n+ actual_bandits_dataset = dataset.next_bandit_round_batch(n_rounds=1000)\n- expected_round_delays = np.asarray(\n- [\n- [35.0, 38.0, 4.0],\n- [3.0, 84.0, 17.0],\n- [44.0, 106.0, 26.0],\n- [14.0, 138.0, 61.0],\n- [1.0, 12.0, 7.0],\n- ]\n- )\n- assert (actual_bandits_dataset.round_delays == expected_round_delays).all()\n+ ordered_rewards = actual_bandits_dataset.expected_rewards[0].argsort()\n+ mean_delays = actual_bandits_dataset.round_delays.sum(axis=0)\n+ assert mean_delays[ordered_rewards[2]] < mean_delays[ordered_rewards[1]] > mean_delays[ordered_rewards[2]]\ndef test_synthetic_sample_results_with_exponential_delay_function_has_different_delays_each_batch():\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_simulator.py",
"new_path": "tests/simulator/test_simulator.py",
"diff": "@@ -342,12 +342,13 @@ def test_bandit_policy_simulator_can_a_single_steps_and_keep_track():\nenvironment=env,\n)\n- for _ in range(5):\n+ for i in range(100):\n+ assert simulator.rounds_played == i\n+ assert len(simulator.selected_actions) == i\nsimulator.step()\n- assert simulator.total_reward == 4\n- assert simulator.rounds_played == 5\n- assert np.all(simulator.selected_actions == [8, 5, 4, 2, 1])\n+ assert simulator.total_reward > 1\n+\ndef test_bandit_policy_simulator_can_do_multiple_steps_in_call_and_keep_track_of_actions_and_performance():\n@@ -364,12 +365,12 @@ def test_bandit_policy_simulator_can_do_multiple_steps_in_call_and_keep_track_of\nenvironment=env,\n)\n- simulator.steps(n_rounds=5)\n+ simulator.steps(n_rounds=100)\n- assert simulator.total_reward == 4\n- assert simulator.rounds_played == 5\n- assert np.all(simulator.selected_actions == [8, 5, 4, 2, 1])\n- assert np.all(simulator.obtained_rewards == [1, 1, 1, 0, 1])\n+ assert simulator.total_reward > 1\n+ assert simulator.rounds_played == 100\n+ assert len(simulator.selected_actions) == 100\n+ assert len(simulator.obtained_rewards) == 100\ndef test_bandit_policy_simulator_can_update_policy_with_delays_if_delay_rounds_are_available():\n@@ -400,14 +401,9 @@ def test_bandit_policy_simulator_can_update_policy_with_delays_if_delay_rounds_a\nsimulator.steps(n_rounds=5)\n- expected_updates = [\n- {\"round\": 1, \"action\": 0, \"reward\": 0},\n- {\"round\": 3, \"action\": 0, \"reward\": 1},\n- {\"round\": 5, \"action\": 2, \"reward\": 0},\n- {\"round\": 5, \"action\": 1, \"reward\": 0},\n- ]\n+ expected_updates = [1, 3, 5, 5]\n- assert tracker.parameter_updates == expected_updates\n+ assert [update['round'] for update in tracker.parameter_updates] == expected_updates\ndef test_bandit_policy_simulator_clears_delay_queue_when_called_into_last_available_round():\n@@ -438,26 +434,15 @@ def test_bandit_policy_simulator_clears_delay_queue_when_called_into_last_availa\nsimulator.steps(n_rounds=5)\n- expected_updates_before_queue_cleared = [\n- {\"action\": 0, \"reward\": 0, \"round\": 3},\n- {\"action\": 0, \"reward\": 1, \"round\": 5},\n- {\"action\": 1, \"reward\": 0, \"round\": 5},\n- ]\n-\n- assert tracker.parameter_updates == expected_updates_before_queue_cleared\n+ expected_updates_before_queue_cleared = [3, 5, 5]\n+ assert [update['round'] for update in tracker.parameter_updates] == expected_updates_before_queue_cleared\nsimulator.clear_delayed_queue()\n- expected_updates_after_queue_cleared = [\n- {\"round\": 3, \"action\": 0, \"reward\": 0},\n- {\"round\": 5, \"action\": 0, \"reward\": 1},\n- {\"round\": 5, \"action\": 1, \"reward\": 0},\n- {\"round\": 5, \"action\": 2, \"reward\": 0},\n- {\"round\": 5, \"action\": 2, \"reward\": 0},\n- ]\n+ expected_updates_before_queue_cleared = [3, 5, 5, 5, 5]\nassert len(simulator.reward_round_lookup.values()) == 0\n- assert tracker.parameter_updates == expected_updates_after_queue_cleared\n+ assert [update['round'] for update in tracker.parameter_updates] == expected_updates_before_queue_cleared\ndef test_bandit_policy_simulator_do_simulation_over_batch_data():\n@@ -563,8 +548,6 @@ def test_ipw_can_be_learned_from_logged_data_generated_by_simulation():\nsimulator.steps(batch_bandit_rounds=env.next_bandit_round_batch(100))\n- assert simulator.total_reward == 53\n-\npropensity_model = LogisticRegression(random_state=12345)\npropensity_model.fit(simulator.contexts, simulator.selected_actions)\npscores = propensity_model.predict_proba(simulator.contexts)\n@@ -587,4 +570,4 @@ def test_ipw_can_be_learned_from_logged_data_generated_by_simulation():\nrewards = np.sum(\nsimulator.ground_truth_rewards * np.squeeze(eval_action_dists, axis=-1)\n)\n- assert rewards == 69.0\n+ assert rewards > simulator.total_reward\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | more robust tests by removing fixed values and increasing rounds |
641,008 | 04.11.2022 13:31:27 | -3,600 | abd9206822a571ace0d68198aa1d5054eca89d3c | run black for linting | [
{
"change_type": "MODIFY",
"old_path": "tests/ope/conftest.py",
"new_path": "tests/ope/conftest.py",
"diff": "@@ -157,6 +157,7 @@ def feedback_key_set() -> Set[str]:\n\"reward\",\n}\n+\n# random evaluation policy\[email protected](scope=\"session\")\ndef random_action_dist(synthetic_bandit_feedback) -> np.ndarray:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_delay_sampler.py",
"new_path": "tests/simulator/test_delay_sampler.py",
"diff": "@@ -42,7 +42,11 @@ def test_synthetic_sample_results_in_sampled_delay_with_weighted_delays_per_arm(\nordered_rewards = actual_bandits_dataset.expected_rewards[0].argsort()\nmean_delays = actual_bandits_dataset.round_delays.sum(axis=0)\n- assert mean_delays[ordered_rewards[2]] < mean_delays[ordered_rewards[1]] > mean_delays[ordered_rewards[2]]\n+ assert (\n+ mean_delays[ordered_rewards[2]]\n+ < mean_delays[ordered_rewards[1]]\n+ > mean_delays[ordered_rewards[2]]\n+ )\ndef test_synthetic_sample_results_with_exponential_delay_function_has_different_delays_each_batch():\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/simulator/test_simulator.py",
"new_path": "tests/simulator/test_simulator.py",
"diff": "@@ -350,7 +350,6 @@ def test_bandit_policy_simulator_can_a_single_steps_and_keep_track():\nassert simulator.total_reward > 1\n-\ndef test_bandit_policy_simulator_can_do_multiple_steps_in_call_and_keep_track_of_actions_and_performance():\nenv = BanditEnvironmentSimulator(\nn_actions=10,\n@@ -403,7 +402,7 @@ def test_bandit_policy_simulator_can_update_policy_with_delays_if_delay_rounds_a\nexpected_updates = [1, 3, 5, 5]\n- assert [update['round'] for update in tracker.parameter_updates] == expected_updates\n+ assert [update[\"round\"] for update in tracker.parameter_updates] == expected_updates\ndef test_bandit_policy_simulator_clears_delay_queue_when_called_into_last_available_round():\n@@ -435,14 +434,18 @@ def test_bandit_policy_simulator_clears_delay_queue_when_called_into_last_availa\nsimulator.steps(n_rounds=5)\nexpected_updates_before_queue_cleared = [3, 5, 5]\n- assert [update['round'] for update in tracker.parameter_updates] == expected_updates_before_queue_cleared\n+ assert [\n+ update[\"round\"] for update in tracker.parameter_updates\n+ ] == expected_updates_before_queue_cleared\nsimulator.clear_delayed_queue()\nexpected_updates_before_queue_cleared = [3, 5, 5, 5, 5]\nassert len(simulator.reward_round_lookup.values()) == 0\n- assert [update['round'] for update in tracker.parameter_updates] == expected_updates_before_queue_cleared\n+ assert [\n+ update[\"round\"] for update in tracker.parameter_updates\n+ ] == expected_updates_before_queue_cleared\ndef test_bandit_policy_simulator_do_simulation_over_batch_data():\n"
}
] | Python | Apache License 2.0 | st-tech/zr-obp | run black for linting |
49,717 | 03.01.2017 16:44:28 | 28,800 | 6963f5e10a7c3e462b24013f1a35cc656718f2dd | Hotfix to make sure only uak+/ua+ is compiled for GPU.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/AggUnaryOp.java",
"diff": "@@ -145,9 +145,12 @@ public class AggUnaryOp extends Hop implements MultiThreadedHop\nelse { //general case\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\nif(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET) && (_op == AggOp.SUM)) {\n+ // Only implemented methods for GPU\n+ if (_op == AggOp.SUM && _direction == Direction.RowCol) {\net = ExecType.GPU;\nk = 1;\n}\n+ }\nagg1 = new PartialAggregate(input.constructLops(),\nHopsAgg2Lops.get(_op), HopsDirection2Lops.get(_direction), getDataType(),getValueType(), et, k);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | Hotfix to make sure only uak+/ua+ is compiled for GPU.
Closes #325. |
49,736 | 06.01.2017 10:20:30 | 28,800 | 4cd98291780691082be07b1a817bd71c1024ed62 | Support bias_add operation
Also added an external UDF for faster SGD Nesterov's update. However
since the performance improvement was only by 7%, I decided not to
introduce a fused operator for it. We can revisit this in a later PR.
bias_add should work for both CP and GPU.
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/layers/conv_builtin.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/conv_builtin.dml",
"diff": "@@ -60,6 +60,7 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b,\n*/\nN = nrow(X)\nF = nrow(W)\n+ # TODO: We should eliminate this in a seperate PR\nHout = as.integer((Hin + 2 * padh - Hf) / strideh + 1)\nWout = as.integer((Win + 2 * padw - Wf) / stridew + 1)\n@@ -68,9 +69,7 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b,\nstride=[strideh,stridew], padding=[padh,padw])\n# Add bias term to each output filter\n- # Note: Biases vector b is replicated to (F, Hout*Wout) first.\n- ones = matrix(1, rows=1, cols=Hout*Wout)\n- out = out + matrix(b %*% ones, rows=1, cols=F*Hout*Wout)\n+ out = bias_add(out, b)\n}\nbackward = function(matrix[double] dout, int Hout, int Wout,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"diff": "@@ -181,14 +181,12 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nif(op == ConvOp.BIAS_ADD) {\nMatrixCharacteristics[] mc = memo.getAllInputStats(getInput());\n- if( mc[0].rowsKnown() && mc[0].colsKnown() ) {\nret = new long[3];\n- ret[0] = mc[0].getRows();\n- ret[1] = mc[0].getCols();\n+ ret[0] = mc[0].rowsKnown() ? mc[0].getRows() : -1;\n+ ret[1] = mc[0].colsKnown() ? mc[0].getCols() : -1;\nret[2] = -1;\nreturn ret;\n}\n- }\nConvolutionParameters params;\ntry {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"diff": "@@ -1105,12 +1105,11 @@ public class BuiltinFunctionExpression extends DataIdentifier\ncase BIAS_ADD:\n{\n- Identifier input_id = getFirstExpr().getOutput();\nExpression input = _args[0];\nExpression bias = _args[1];\noutput.setDataType(DataType.MATRIX);\noutput.setValueType(ValueType.DOUBLE);\n- output.setDimensions(input_id.getDim1(), input_id.getDim2());\n+ output.setDimensions(input.getOutput().getDim1(), input.getOutput().getDim2());\noutput.setBlockDimensions(input.getOutput().getRowsInBlock(), input.getOutput().getColumnsInBlock());\ncheckMatrixParam(input);\ncheckMatrixParam(bias);\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysml/udf/lib/SGDNesterovUpdate.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.udf.lib;\n+\n+import java.io.IOException;\n+import java.util.Iterator;\n+import java.util.Random;\n+\n+import org.apache.sysml.runtime.controlprogram.caching.CacheException;\n+import org.apache.sysml.runtime.matrix.data.IJV;\n+import org.apache.sysml.runtime.matrix.data.InputInfo;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.OutputInfo;\n+import org.apache.sysml.udf.FunctionParameter;\n+import org.apache.sysml.udf.Matrix;\n+import org.apache.sysml.udf.PackageFunction;\n+import org.apache.sysml.udf.Scalar;\n+import org.apache.sysml.udf.Matrix.ValueType;\n+\n+/**\n+ * Use this class to perform an SGD update with Nesterov momentum in CP.\n+ * Assumption: the input batch fits in CP (which is also the assumption of most deep learning systems).\n+ *\n+ * Usage:\n+ * update_nesterov = externalFunction(matrix[double] X, matrix[double] dX, double lr, double mu, matrix[double] v) return (matrix[double] X, matrix[double] v) implemented in (classname=\"org.apache.sysml.udf.lib.SGDNesterovUpdate\",exectype=\"mem\");\n+ * [X, v] = update_nesterov(X, dX, lr, mu, v);\n+ *\n+ *\n+ * This class eliminates the unnecessary instruction overhead as well as memory pressure.\n+ *\n+ */\n+public class SGDNesterovUpdate extends PackageFunction {\n+ private static final long serialVersionUID = -3905212831582648882L;\n+\n+ private Matrix updatedX;\n+ private Matrix updatedV;\n+ private Random rand = new Random();\n+\n+ @Override\n+ public int getNumFunctionOutputs() {\n+ return 2;\n+ }\n+\n+ @Override\n+ public FunctionParameter getFunctionOutput(int pos) {\n+ if(pos == 0)\n+ return updatedX;\n+ else if(pos == 1)\n+ return updatedV;\n+\n+ throw new RuntimeException(\"Invalid function output being requested\");\n+ }\n+\n+ @Override\n+ public void execute() {\n+ try {\n+ MatrixBlock X = ((Matrix) getFunctionInput(0)).getMatrixObject().acquireRead();\n+ MatrixBlock dX = ((Matrix) getFunctionInput(1)).getMatrixObject().acquireRead();\n+ double lr = Double.parseDouble(((Scalar)getFunctionInput(2)).getValue());\n+ double mu = Double.parseDouble(((Scalar)getFunctionInput(3)).getValue());\n+ MatrixBlock v = ((Matrix) getFunctionInput(4)).getMatrixObject().acquireRead();\n+\n+ // v = mu * v - lr * dX\n+ updatedV = new Matrix( \"tmp_\" + rand.nextLong(), v.getNumRows(), v.getNumColumns(), ValueType.Double );\n+ MatrixBlock updatedVMB = allocateDenseMatrixBlock(updatedV);\n+ double [] updatedVData = updatedVMB.getDenseBlock();\n+ multiplyByConstant(v, mu, updatedVData);\n+ multiplyByConstant(dX, -lr, updatedVData);\n+ updatedVMB.setNonZeros(-1); // rather than updatedVMB.recomputeNonZeros();\n+ updatedV.setMatrixDoubleArray(updatedVMB, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n+\n+ // X = X - mu * v_prev + (1 + mu) * v\n+ updatedX = new Matrix( \"tmp_\" + rand.nextLong(), X.getNumRows(), X.getNumColumns(), ValueType.Double );\n+ MatrixBlock updatedXMB = allocateDenseMatrixBlock(updatedX);\n+ double [] updatedXData = updatedXMB.getDenseBlock();\n+ copy(X, updatedXData);\n+ multiplyByConstant(v, -mu, updatedXData);\n+ multiplyByConstant(updatedVData, 1+mu, updatedXData);\n+ updatedXMB.setNonZeros(-1); // rather than updatedXMB.recomputeNonZeros();\n+ updatedX.setMatrixDoubleArray(updatedXMB, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n+\n+ ((Matrix) getFunctionInput(0)).getMatrixObject().release();\n+ ((Matrix) getFunctionInput(1)).getMatrixObject().release();\n+ ((Matrix) getFunctionInput(4)).getMatrixObject().release();\n+ } catch (CacheException e) {\n+ throw new RuntimeException(\"Exception while executing SGDNesterovUpdate\", e);\n+ } catch (IOException e) {\n+ throw new RuntimeException(\"Exception while executing SGDNesterovUpdate\", e);\n+ }\n+ }\n+\n+ private MatrixBlock allocateDenseMatrixBlock(Matrix mat) {\n+ int rows = (int) mat.getNumRows();\n+ int cols = (int) mat.getNumCols();\n+ MatrixBlock mb = new MatrixBlock(rows, cols, false);\n+ mb.allocateDenseBlock();\n+ return mb;\n+ }\n+\n+\n+ // out += constant*in\n+ private void multiplyByConstant(double [] in, double constant, double [] out) {\n+ for(int i = 0; i < out.length; i++) {\n+ out[i] += in[i]*constant;\n+ }\n+ }\n+\n+ // out += constant*in\n+ private void multiplyByConstant(MatrixBlock in, double constant, double [] out) {\n+ if(in.isInSparseFormat()) {\n+ Iterator<IJV> iter = in.getSparseBlockIterator();\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ out[ijv.getI()*ijv.getJ()] += ijv.getV() * constant;\n+ }\n+ }\n+ else {\n+ double [] denseBlock = in.getDenseBlock();\n+ if(denseBlock != null) {\n+ // If not empty block\n+ for(int i = 0; i < out.length; i++) {\n+ out[i] += denseBlock[i]*constant;\n+ }\n+ }\n+ }\n+ }\n+\n+ // Assumption dest is zero-ed out.\n+ private void copy(MatrixBlock src, double [] dest) {\n+ if(src.isInSparseFormat()) {\n+ Iterator<IJV> iter = src.getSparseBlockIterator();\n+ while(iter.hasNext()) {\n+ IJV ijv = iter.next();\n+ dest[ijv.getI()*ijv.getJ()] = ijv.getV();\n+ }\n+ }\n+ else {\n+ double [] denseBlock = src.getDenseBlock();\n+ if(denseBlock != null) {\n+ // If not empty block\n+ System.arraycopy(denseBlock, 0, dest, 0, dest.length);\n+ }\n+ }\n+ }\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-540] Support bias_add operation
- Also added an external UDF for faster SGD Nesterov's update. However
since the performance improvement was only by 7%, I decided not to
introduce a fused operator for it. We can revisit this in a later PR.
- bias_add should work for both CP and GPU.
Closes #328. |
49,736 | 06.01.2017 10:23:47 | 28,800 | 2b5b12557556d95c499730ae105807d996ad7a47 | Added fused relu_maxpooling
Fused relu_maxpooling reduces the unnecessary dense-to-sparse-to-dense
conversion. This operator makes relu a "no op".
Note: fused relu_maxpooling is only supported in CP, not on GPU as both
relu and maxpooling invoke CuDNN functions.
Also, improved the performance of maxpooling by computing indexes
apriori.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"diff": "@@ -25,6 +25,7 @@ import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.hops.Hop.MultiThreadedHop;\nimport org.apache.sysml.lops.ConvolutionTransform;\n+import org.apache.sysml.lops.ConvolutionTransform.OperationTypes;\nimport org.apache.sysml.lops.Lop;\nimport org.apache.sysml.lops.LopsException;\nimport org.apache.sysml.lops.LopProperties.ExecType;\n@@ -136,10 +137,18 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nthrow new HopsException(\"Incorrect number of inputs for \" + op.name());\n}\n- Lop in = inputs.get(0).constructLops();\n+ Lop in = null;\n+ OperationTypes lopOp = HopsConv2Lops.get(op);\nint k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);\n- ConvolutionTransform transform1 = new ConvolutionTransform( in,\n- HopsConv2Lops.get(op), getDataType(), getValueType(), et, k);\n+ if(op == ConvOp.MAX_POOLING && et == ExecType.CP && inputs.get(0) instanceof UnaryOp\n+ && ((UnaryOp) inputs.get(0)).getOp() == OpOp1.SELP) {\n+ in = inputs.get(0).getInput().get(0).constructLops();\n+ lopOp = OperationTypes.RELU_MAX_POOLING;\n+ }\n+ else {\n+ in = inputs.get(0).constructLops();\n+ }\n+ ConvolutionTransform transform1 = new ConvolutionTransform( in, lopOp, getDataType(), getValueType(), et, k);\nsetOutputDimensions(transform1);\nsetLineNumbers(transform1);\nin.addOutput(transform1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/lops/ConvolutionTransform.java",
"new_path": "src/main/java/org/apache/sysml/lops/ConvolutionTransform.java",
"diff": "@@ -30,7 +30,7 @@ public class ConvolutionTransform extends Lop\npublic enum OperationTypes {\n- MAX_POOLING, MAX_POOLING_BACKWARD,\n+ MAX_POOLING, MAX_POOLING_BACKWARD, RELU_MAX_POOLING,\nDIRECT_CONV2D, DIRECT_CONV2D_BACKWARD_FILTER, DIRECT_CONV2D_BACKWARD_DATA,\nBIAS_ADD\n};\n@@ -99,6 +99,9 @@ public class ConvolutionTransform extends Lop\ncase MAX_POOLING:\nreturn \"maxpooling\";\n+ case RELU_MAX_POOLING:\n+ return \"relu_maxpooling\";\n+\ncase MAX_POOLING_BACKWARD:\nreturn \"maxpooling_backward\";\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java",
"diff": "@@ -218,6 +218,7 @@ public class CPInstructionParser extends InstructionParser\nString2CPInstructionType.put( \"rsort\" , CPINSTRUCTION_TYPE.Reorg);\n// Opcodes related to convolutions\n+ String2CPInstructionType.put( \"relu_maxpooling\" , CPINSTRUCTION_TYPE.Convolution);\nString2CPInstructionType.put( \"maxpooling\" , CPINSTRUCTION_TYPE.Convolution);\nString2CPInstructionType.put( \"maxpooling_backward\" , CPINSTRUCTION_TYPE.Convolution);\nString2CPInstructionType.put( \"conv2d\" , CPINSTRUCTION_TYPE.Convolution);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java",
"diff": "package org.apache.sysml.runtime.instructions.cp;\nimport java.util.ArrayList;\n+import java.util.Arrays;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\n@@ -89,7 +90,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nString[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\nString opcode = parts[0];\n- if (opcode.equalsIgnoreCase(\"maxpooling\")) {\n+ if (opcode.equalsIgnoreCase(\"maxpooling\") || opcode.equalsIgnoreCase(\"relu_maxpooling\")) {\nInstructionUtils.checkNumFields(parts, 15);\n// stride1, stride2, padding1, padding2\n// input_shape1, input_shape2, input_shape3, input_shape4,\n@@ -231,12 +232,14 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\nint Q = (int) ConvolutionUtils.getQ(W, S, stride_w, pad_w);\nConvolutionParameters params = new ConvolutionParameters(N, C, H, W, K, R, S, stride_h, stride_w, pad_h, pad_w, _numThreads);\n- if (instOpcode.equalsIgnoreCase(\"maxpooling\")) {\n+ if (instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) {\nif(matBlock.isEmptyBlock()) {\noutputBlock = new MatrixBlock(N, C*P*Q, true, 0);\n}\nelse {\noutputBlock = getDenseOutputBlock(ec, N, C*P*Q);\n+ if(instOpcode.equalsIgnoreCase(\"maxpooling\"))\n+ Arrays.fill(outputBlock.getDenseBlock(), -Double.MAX_VALUE);\nLibMatrixDNN.maxpooling(matBlock, outputBlock, params);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/ConvolutionParameters.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/ConvolutionParameters.java",
"diff": "package org.apache.sysml.runtime.matrix.data;\n-import java.util.concurrent.atomic.AtomicLong;\n-\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.util.ConvolutionUtils;\n@@ -33,10 +31,11 @@ public class ConvolutionParameters {\npublic int K; public int R; public int S; public int stride_h; public int stride_w; public int pad_h; public int pad_w;\npublic int P; public int Q; public int numThreads;\n- public AtomicLong outputNNZ = new AtomicLong(-1);\nMatrixBlock input1; MatrixBlock input2; MatrixBlock output;\n+ public int [] start_indexes_h, end_indexes_h, start_indexes_w, end_indexes_w;\n+\nprivate int convertToInt(long val) throws DMLRuntimeException {\nif( val > Integer.MAX_VALUE ) {\nthrow new DMLRuntimeException(\"The value for ConvolutionParameters is too large:\" + val);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java",
"diff": "@@ -379,9 +379,31 @@ public class LibMatrixDNN {\nif (params.output.isInSparseFormat())\nthrow new DMLRuntimeException(\"Sparse maxpooling_backward is not supported\");\n+ fillIndexesArray(params);\nrunConvTask(TaskType.MaxPooling_Backward, params);\n}\n+ private static void fillIndexesArray(ConvolutionParameters params) {\n+ params.start_indexes_h = new int[params.P];\n+ params.end_indexes_h = new int[params.P];\n+ params.start_indexes_w = new int[params.Q];\n+ params.end_indexes_w = new int[params.Q];\n+ for (int p = 0; p < params.P; p++) {\n+ int start_index_h = p * params.stride_h - params.pad_h;\n+ final int end_index_h = Math.min(start_index_h + params.R, params.H);\n+ start_index_h = Math.max(start_index_h, 0);\n+ params.start_indexes_h[p] = start_index_h;\n+ params.end_indexes_h[p] = end_index_h;\n+ }\n+ for (int q = 0; q < params.Q; q++) {\n+ int start_index_w = Math.max(q * params.stride_w - params.pad_w, 0);\n+ int end_index_w = Math.min(start_index_w + params.S, params.W);\n+ start_index_w = Math.max(start_index_w, 0);\n+ params.start_indexes_w[q] = start_index_w;\n+ params.end_indexes_w[q] = end_index_w;\n+ }\n+ }\n+\nprivate static void doPoolingBackward(int n, ConvolutionParameters params) throws DMLRuntimeException {\ndouble [] inputArray = null;\nif (!params.input1.isInSparseFormat())\n@@ -419,10 +441,7 @@ public class LibMatrixDNN {\ndouble inVal = doutArray[n*params.C*params.P*params.Q + c*params.P*params.Q + p * params.Q + q];\nif(inVal != 0) {\nfinal int inputOffset = n*params.C*params.H*params.W + c*params.H*params.W;\n- int start_index_h = p * params.stride_h - params.pad_h;\n- final int end_index_h = Math.min(start_index_h + params.R, params.H);\n- start_index_h = Math.max(start_index_h, 0);\n- int maxIndex = getMaxIndexSparse(start_index_h, end_index_h, q, inputOffset, n, c, params.input1, params);\n+ int maxIndex = getMaxIndexSparse(p, q, inputOffset, n, c, params.input1, params);\noutputArray[maxIndex] += inVal;\n}\n}\n@@ -446,10 +465,7 @@ public class LibMatrixDNN {\nint q = tensorIndexes[2];\nfinal int inputOffset = n*params.C*params.H*params.W + c*params.H*params.W;\n- int start_index_h = p * params.stride_h - params.pad_h;\n- final int end_index_h = Math.min(start_index_h + params.R, params.H);\n- start_index_h = Math.max(start_index_h, 0);\n- int maxIndex = getMaxIndexSparse(start_index_h, end_index_h, q, inputOffset, n, c, params.input1, params);\n+ int maxIndex = getMaxIndexSparse(p, q, inputOffset, n, c, params.input1, params);\noutputArray[maxIndex] += ijv.getV();\n}\n@@ -469,10 +485,7 @@ public class LibMatrixDNN {\nint q = tensorIndexes[2];\nfinal int inputOffset = n*params.C*params.H*params.W + c*params.H*params.W;\n- int start_index_h = p * params.stride_h - params.pad_h;\n- final int end_index_h = Math.min(start_index_h + params.R, params.H);\n- start_index_h = Math.max(start_index_h, 0);\n- int maxIndex = getMaxIndex(start_index_h, end_index_h, q, inputOffset, inputArray, params);\n+ int maxIndex = getMaxIndex(p, q, inputOffset, inputArray, params);\noutputArray[maxIndex] += ijv.getV();\n}\n}\n@@ -484,20 +497,15 @@ public class LibMatrixDNN {\nfinal int outputOffset = n*params.C*params.P*params.Q + c*params.P*params.Q;\nfor (int p = 0; p < params.P; p++) {\n- int start_index_h = p * params.stride_h - params.pad_h;\n- final int end_index_h = Math.min(start_index_h + params.R, params.H);\n- start_index_h = Math.max(start_index_h, 0);\n-\nfor (int q = 0; q < params.Q; q++) {\n- int maxIndex = getMaxIndex(start_index_h, end_index_h, q, inputOffset, inputArray, params);\n+ int maxIndex = getMaxIndex(p, q, inputOffset, inputArray, params);\noutputArray[maxIndex] += doutArray[outputOffset + p * params.Q + q];\n}\n}\n}\n}\n- private static int getMaxIndexSparse(int start_index_h, int end_index_h,\n- int q, int inputOffset, int n, int c, MatrixBlock input, ConvolutionParameters params) throws DMLRuntimeException {\n+ private static int getMaxIndexSparse(int p, int q, int inputOffset, int n, int c, MatrixBlock input, ConvolutionParameters params) throws DMLRuntimeException {\nif(!input.isInSparseFormat())\nthrow new DMLRuntimeException(\"Incorrect usage: Only sparse format supported\");\n@@ -505,9 +513,10 @@ public class LibMatrixDNN {\nIterator<IJV> iter = input.sparseBlock.getIterator(n, n+1);\nint [] tensorIndexes = new int[3];\n- int start_index_w = Math.max(q * params.stride_w - params.pad_w, 0);\n- int end_index_w = Math.min(start_index_w + params.S, params.W);\n- start_index_w = Math.max(start_index_w, 0);\n+ int start_index_h = params.start_indexes_h[p];\n+ int end_index_h = params.end_indexes_h[p];\n+ int start_index_w = params.start_indexes_w[q];\n+ int end_index_w = params.end_indexes_w[q];\nint maxIndex = inputOffset + start_index_h*params.W + start_index_w;\ndouble maxVal = -Double.MAX_VALUE;\n@@ -532,11 +541,11 @@ public class LibMatrixDNN {\nreturn maxIndex;\n}\n- private static int getMaxIndex(int start_index_h, int end_index_h,\n- int q, int inputOffset, double [] inputArray, ConvolutionParameters params) {\n- int start_index_w = q * params.stride_w - params.pad_w;\n- int end_index_w = Math.min(start_index_w + params.S, params.W);\n- start_index_w = Math.max(start_index_w, 0);\n+ private static int getMaxIndex(int p, int q, int inputOffset, double [] inputArray, ConvolutionParameters params) {\n+ int start_index_h = params.start_indexes_h[p];\n+ int end_index_h = params.end_indexes_h[p];\n+ int start_index_w = params.start_indexes_w[q];\n+ int end_index_w = params.end_indexes_w[q];\nint maxIndex = inputOffset + start_index_h*params.W + start_index_w;\ndouble maxVal = -Double.MAX_VALUE;\n@@ -619,12 +628,11 @@ public class LibMatrixDNN {\nthrow new DMLRuntimeException(\"Incorrect input dimensions in maxpooling:\" + input.getNumRows() + \" \" + input.getNumColumns() + \" \" + params.N + \" \" + params.K*params.P*params.Q);\n}\n- params.outputNNZ.set(0);\n+ fillIndexesArray(params);\nrunConvTask(TaskType.MaxPooling_Forward, params);\n- outputBlock.setNonZeros(params.outputNNZ.get());\n}\n- private static void doPooling(int n, int c, ConvolutionParameters params) throws DMLRuntimeException {\n+ private static void doPooling(int n, ConvolutionParameters params) throws DMLRuntimeException {\ndouble [] inputArray = null;\nif (!params.input1.isInSparseFormat())\ninputArray = params.input1.getDenseBlock();\n@@ -634,32 +642,40 @@ public class LibMatrixDNN {\nelse\nthrow new DMLRuntimeException(\"Expected the output to be allocated in dense format\");\n- long tmpNNZ = 0;\n+ final int inOffset = n*params.C*params.H*params.W;\n+ int out_index = n*params.C*params.P*params.Q;\n+ final int HW = params.H*params.W;\n+\n+ if(inputArray != null) {\n+ for (int c = 0; c < params.C; c++) {\n+ final int inOffset1 = inOffset + c*HW;\nfor (int p = 0; p < params.P; p++) {\n- for (int q = 0; q < params.Q; q++) {\n- int start_index_h = p * params.stride_h - params.pad_h;\n- int start_index_w = q * params.stride_w - params.pad_w;\n- int end_index_h = Math.min(start_index_h + params.R, params.H);\n- int end_index_w = Math.min(start_index_w + params.S, params.W);\n- start_index_h = Math.max(start_index_h, 0);\n- start_index_w = Math.max(start_index_w, 0);\n- int out_index = n*params.C*params.P*params.Q + c*params.P*params.Q + p * params.Q + q;\n- outputArray[out_index] = -Double.MAX_VALUE;\n- for (int h = start_index_h; h < end_index_h; h++) {\n- for (int w = start_index_w; w < end_index_w; w++) {\n- double inVal = -1;\n- if(inputArray != null)\n- inVal = inputArray[n*params.C*params.H*params.W + c*params.H*params.W + h*params.W + w];\n- else\n- inVal = params.input1.quickGetValue(n, c*params.H*params.W + h*params.W + w);\n+ for (int q = 0; q < params.Q; q++, out_index++) {\n+ for (int h = params.start_indexes_h[p]; h < params.end_indexes_h[p]; h++) {\n+ for (int w = params.start_indexes_w[q]; w < params.end_indexes_w[q]; w++) {\n+ outputArray[out_index] = Math.max(outputArray[out_index], inputArray[inOffset1 + h*params.W + w]);\n+ }\n+ }\n+ }\n+ }\n+ }\n+ }\n+ else {\n+ // TODO: Optimize sparse maxpooling\n+ // Low priority after adding fused relu_maxpooling operator as output of conv2d expected to be dense\n+ for (int c = 0; c < params.C; c++) {\n+ for (int p = 0; p < params.P; p++) {\n+ for (int q = 0; q < params.Q; q++, out_index++) {\n+ for (int h = params.start_indexes_h[p]; h < params.end_indexes_h[p]; h++) {\n+ for (int w = params.start_indexes_w[q]; w < params.end_indexes_w[q]; w++) {\n+ double inVal = params.input1.quickGetValue(n, c*HW + h*params.W + w);\noutputArray[out_index] = Math.max(outputArray[out_index], inVal);\n- if(outputArray[out_index] != 0)\n- tmpNNZ++;\n}\n}\n}\n}\n- params.outputNNZ.addAndGet(tmpNNZ);\n+ }\n+ }\n}\nprivate static void doRotate180(int inputN, int outputN, MatrixBlock input,\n@@ -818,9 +834,7 @@ public class LibMatrixDNN {\ncase MaxPooling_Forward:\n{\nfor(int n = n1; n < n2; n++) {\n- for (int c = 0; c < params.C; c++) {\n- doPooling(n, c, params);\n- }\n+ doPooling(n, params);\n}\nbreak;\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-540] Added fused relu_maxpooling
- Fused relu_maxpooling reduces the unnecessary dense-to-sparse-to-dense
conversion. This operator makes relu a "no op".
- Note: fused relu_maxpooling is only supported in CP, not on GPU as both
relu and maxpooling invoke CuDNN functions.
- Also, improved the performance of maxpooling by computing indexes
apriori.
Closes #329. |
49,736 | 06.01.2017 10:28:54 | 28,800 | 6f8cea9bc2d42913d0cf1c917c1aada9aa55bee1 | Add support for cublas daxpy operation
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/TernaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/TernaryOp.java",
"diff": "package org.apache.sysml.hops;\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.hops.rewrite.HopRewriteUtils;\nimport org.apache.sysml.lops.Aggregate;\n@@ -635,10 +636,14 @@ public class TernaryOp extends Hop\nif ( _op != OpOp3.PLUS_MULT && _op != OpOp3.MINUS_MULT )\nthrow new HopsException(\"Unexpected operation: \" + _op + \", expecting \" + OpOp3.PLUS_MULT + \" or\" + OpOp3.MINUS_MULT);\n- ExecType et = optFindExecType();\n+ ExecType et = null;\n+ if(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET) )\n+ et = ExecType.GPU;\n+ else\n+ et = optFindExecType();\nPlusMult plusmult = null;\n- if( et == ExecType.CP || et == ExecType.SPARK ) {\n+ if( et == ExecType.CP || et == ExecType.SPARK || et == ExecType.GPU ) {\nplusmult = new PlusMult(\ngetInput().get(0).constructLops(),\ngetInput().get(1).constructLops(),\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/lops/PlusMult.java",
"new_path": "src/main/java/org/apache/sysml/lops/PlusMult.java",
"diff": "@@ -45,7 +45,7 @@ public class PlusMult extends Lop\nboolean aligner = false;\nboolean definesMRJob = false;\n- if ( et == ExecType.CP || et == ExecType.SPARK ){\n+ if ( et == ExecType.CP || et == ExecType.SPARK || et == ExecType.GPU ){\nlps.addCompatibility(JobType.INVALID);\nlps.setProperties( inputs, et, ExecLocation.ControlProgram, breaksAlignment, aligner, definesMRJob );\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java",
"diff": "@@ -26,6 +26,7 @@ import org.apache.sysml.runtime.instructions.gpu.ArithmeticBinaryGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.BuiltinUnaryGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.ConvolutionGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction;\n+import org.apache.sysml.runtime.instructions.gpu.MatrixMatrixAxpyGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction.GPUINSTRUCTION_TYPE;\nimport org.apache.sysml.runtime.instructions.gpu.MMTSJGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.ReorgGPUInstruction;\n@@ -121,6 +122,10 @@ public class GPUInstructionParser extends InstructionParser\nreturn ReorgGPUInstruction.parseInstruction(str);\ncase ArithmeticBinary:\n+ String opcode = InstructionUtils.getOpCode(str);\n+ if( opcode.equals(\"+*\") || opcode.equals(\"-*\") )\n+ return MatrixMatrixAxpyGPUInstruction.parseInstruction(str);\n+ else\nreturn ArithmeticBinaryGPUInstruction.parseInstruction(str);\ndefault:\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixMatrixAxpyGPUInstruction.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.instructions.gpu;\n+\n+import org.apache.sysml.parser.Expression.DataType;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.instructions.cp.CPOperand;\n+import org.apache.sysml.runtime.instructions.cp.ScalarObject;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\n+import org.apache.sysml.runtime.matrix.operators.Operator;\n+import org.apache.sysml.utils.Statistics;\n+\n+public class MatrixMatrixAxpyGPUInstruction extends ArithmeticBinaryGPUInstruction\n+{\n+\n+ CPOperand constant = null;\n+ int multiplier = 1;\n+ public MatrixMatrixAxpyGPUInstruction(Operator op,\n+ CPOperand in1,\n+ CPOperand constant,\n+ int multiplier,\n+ CPOperand in2,\n+ CPOperand out,\n+ String opcode,\n+ String istr){\n+ super(op, in1, in2, out, opcode, istr);\n+ this.constant = constant;\n+ this.multiplier = multiplier;\n+ }\n+\n+ public static MatrixMatrixAxpyGPUInstruction parseInstruction ( String str ) throws DMLRuntimeException {\n+ String[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\n+ InstructionUtils.checkNumFields ( parts, 4 );\n+\n+ String opcode = parts[0];\n+ int multiplier = 1;\n+ if(opcode.equals(\"-*\"))\n+ multiplier = -1;\n+ CPOperand in1 = new CPOperand(parts[1]);\n+ CPOperand constant = new CPOperand(parts[2]);\n+ if(constant.getDataType() != DataType.SCALAR)\n+ throw new DMLRuntimeException(\"Expected second operand to be a scalar\");\n+ CPOperand in2 = new CPOperand(parts[3]);\n+ CPOperand out = new CPOperand(parts[4]);\n+\n+ DataType dt1 = in1.getDataType();\n+ DataType dt2 = in2.getDataType();\n+ DataType dt3 = out.getDataType();\n+\n+ Operator operator = (dt1 != dt2) ?\n+ InstructionUtils.parseScalarBinaryOperator(opcode, (dt1 == DataType.SCALAR)) :\n+ InstructionUtils.parseBinaryOperator(opcode);\n+\n+ if(dt1 == DataType.MATRIX && dt2 == DataType.MATRIX && dt3 == DataType.MATRIX) {\n+ return new MatrixMatrixAxpyGPUInstruction(operator, in1, constant, multiplier, in2, out, opcode, str);\n+ }\n+ else if( dt3 == DataType.MATRIX && ((dt1 == DataType.SCALAR && dt2 == DataType.MATRIX) || (dt1 == DataType.MATRIX && dt2 == DataType.SCALAR)) ) {\n+ throw new DMLRuntimeException(\"Unsupported GPU PlusMult/MinusMult ArithmeticInstruction.\");\n+ // return new ScalarMatrixArithmeticGPUInstruction(operator, in1, in2, out, opcode, str);\n+ }\n+ else\n+ throw new DMLRuntimeException(\"Unsupported GPU ArithmeticInstruction.\");\n+ }\n+\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec) throws DMLRuntimeException {\n+ Statistics.incrementNoOfExecutedGPUInst();\n+\n+ MatrixObject in1 = ec.getMatrixInputForGPUInstruction(_input1.getName());\n+ MatrixObject in2 = ec.getMatrixInputForGPUInstruction(_input2.getName());\n+ ScalarObject scalar = ec.getScalarInput(constant.getName(), constant.getValueType(), constant.isLiteral());\n+\n+ long rlen1 = in1.getNumRows();\n+ long clen1 = in1.getNumColumns();\n+ long rlen2 = in2.getNumRows();\n+ long clen2 = in2.getNumColumns();\n+ if (rlen1 != rlen2 || clen1 != clen2){\n+ // TODO: We donot support matrix-vector axpy operation\n+ throw new DMLRuntimeException(\"The dimensions of inputs in GPU axpy operation should match:\"+\n+ rlen1 + \" != \" + rlen2 + \" || \" + clen1 + \" != \" + clen2);\n+ }\n+\n+ ec.setMetaData(_output.getName(), (int)rlen1, (int)clen1);\n+\n+ LibMatrixCUDA.axpy(ec, in1, in2, _output.getName(), multiplier*scalar.getDoubleValue());\n+\n+ ec.releaseMatrixInputForGPUInstruction(_input1.getName());\n+ ec.releaseMatrixInputForGPUInstruction(_input2.getName());\n+ ec.releaseMatrixOutputForGPUInstruction(_output.getName());\n+ }\n+}\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -56,6 +56,7 @@ import static jcuda.runtime.JCuda.cudaMalloc;\nimport static jcuda.runtime.JCuda.cudaMemcpy;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice;\n+import static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice;\nimport static jcuda.jcudnn.cudnnActivationMode.CUDNN_ACTIVATION_RELU;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n@@ -1723,6 +1724,37 @@ public class LibMatrixCUDA {\nsrc, dest, rlen, clen);\n}\n+ /**\n+ * Performs daxpy operation\n+ *\n+ * @param ec\n+ * @param in1\n+ * @param in2\n+ * @param outputName\n+ * @param constant\n+ * @throws DMLRuntimeException\n+ */\n+ public static void axpy(ExecutionContext ec, MatrixObject in1, MatrixObject in2,\n+ String outputName, double constant) throws DMLRuntimeException {\n+ if(isInSparseFormat(in1))\n+ ((JCudaObject)in1.getGPUObject()).sparseToDense();\n+ if(isInSparseFormat(in2))\n+ ((JCudaObject)in2.getGPUObject()).sparseToDense();\n+ Pointer A = ((JCudaObject)in1.getGPUObject()).jcudaDenseMatrixPtr;\n+ Pointer B = ((JCudaObject)in2.getGPUObject()).jcudaDenseMatrixPtr;\n+ MatrixObject out = ec.getMatrixObject(outputName);\n+ ec.getDenseMatrixOutputForGPUInstruction(outputName); // Allocated the dense output matrix\n+ Pointer C = ((JCudaObject)out.getGPUObject()).jcudaDenseMatrixPtr;\n+ Pointer alphaPtr = pointerTo(constant);\n+ long n = (in1.getNumRows()*in1.getNumColumns());\n+ // C <- A + alpha*B\n+ // becomes\n+ // C <- A\n+ // C <- alpha*B + C\n+ cudaMemcpy(C, A, n*((long)jcuda.Sizeof.DOUBLE), cudaMemcpyDeviceToDevice);\n+ JCublas2.cublasDaxpy(cublasHandle, (int) n, alphaPtr, B, 1, C, 1);\n+ }\n+\n/**\n* Performs elementwise operation specified by op of two input matrices in1 and in2\n*\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFuseBinaryOpChainTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFuseBinaryOpChainTest.java",
"diff": "@@ -153,6 +153,7 @@ public class RewriteFuseBinaryOpChainTest extends AutomatedTestBase\n* @param rewrites\n* @param instType\n*/\n+ @SuppressWarnings(\"unused\")\nprivate void testFuseBinaryChain( String testname, boolean rewrites, ExecType instType )\n{\nRUNTIME_PLATFORM platformOld = rtplatform;\n@@ -191,7 +192,12 @@ public class RewriteFuseBinaryOpChainTest extends AutomatedTestBase\n//check for applies rewrites\nif( rewrites && instType!=ExecType.MR ) {\n- String prefix = (instType==ExecType.SPARK) ? Instruction.SP_INST_PREFIX : \"\";\n+ String prefix = \"\";\n+ if((instType == ExecType.SPARK || instType==ExecType.CP) && AutomatedTestBase.TEST_GPU)\n+ prefix = Instruction.GPU_INST_PREFIX;\n+ else if(instType == ExecType.SPARK)\n+ prefix = Instruction.SP_INST_PREFIX;\n+\nString opcode = (testname.equals(TEST_NAME1)||testname.equals(TEST_NAME3)) ? prefix+\"+*\" : prefix+\"-*\";\nAssert.assertTrue(\"Rewrite not applied.\",Statistics.getCPHeavyHitterOpCodes().contains(opcode));\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-446] Add support for cublas daxpy operation
Closes #330. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.